return archX86(&x86.Link386)
case "amd64":
return archX86(&x86.Linkamd64)
- case "amd64p32":
- return archX86(&x86.Linkamd64p32)
case "arm":
return archArm()
case "arm64":
import (
"cmd/compile/internal/gc"
"cmd/internal/obj/x86"
- "cmd/internal/objabi"
)
var leaptr = x86.ALEAQ
func Init(arch *gc.Arch) {
arch.LinkArch = &x86.Linkamd64
- if objabi.GOARCH == "amd64p32" {
- arch.LinkArch = &x86.Linkamd64p32
- leaptr = x86.ALEAL
- }
arch.REGSP = x86.REGSP
arch.MAXWIDTH = 1 << 50
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
c.hasGReg = false
- case "amd64p32":
- c.PtrSize = 4
- c.RegSize = 8
- c.lowerBlock = rewriteBlockAMD64
- c.lowerValue = rewriteValueAMD64
- c.splitLoad = rewriteValueAMD64splitload
- c.registers = registersAMD64[:]
- c.gpRegMask = gpRegMaskAMD64
- c.fpRegMask = fpRegMaskAMD64
- c.FPReg = framepointerRegAMD64
- c.LinkReg = linkRegAMD64
- c.hasGReg = false
- c.noDuffDevice = true
case "386":
c.PtrSize = 4
c.RegSize = 4
// lowers them, so we only perform this optimization on platforms that we know to
// have fast Move ops.
switch c.arch {
- case "amd64", "amd64p32":
+ case "amd64":
return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz))
case "386", "ppc64", "ppc64le", "arm64":
return sz <= 8
// for sizes < 32-bit. This is used to decide whether to promote some rotations.
func hasSmallRotate(c *Config) bool {
switch c.arch {
- case "amd64", "amd64p32", "386":
+ case "amd64", "386":
return true
default:
return false
var archInits = map[string]func(*gc.Arch){
"386": x86.Init,
"amd64": amd64.Init,
- "amd64p32": amd64.Init,
"arm": arm.Init,
"arm64": arm64.Init,
"mips": mips.Init,
var okgoarch = []string{
"386",
"amd64",
- "amd64p32",
"arm",
"arm64",
"mips",
"android",
"solaris",
"freebsd",
+ "nacl", // keep;
"netbsd",
"openbsd",
"plan9",
"illumos": true,
"js": true,
"linux": true,
- "nacl": true,
+ "nacl": true, // legacy; don't remove
"netbsd": true,
"openbsd": true,
"plan9": true,
var KnownArch = map[string]bool{
"386": true,
"amd64": true,
- "amd64p32": true,
+ "amd64p32": true, // legacy; don't remove
"arm": true,
"armbe": true,
"arm64": true,
switch cfg.Goarch {
case "386":
return []string{"-m32"}
- case "amd64", "amd64p32":
+ case "amd64":
return []string{"-m64"}
case "arm":
return []string{"-marm"} // not thumb
DWARFRegisters: AMD64DWARFRegisters,
}
-var Linkamd64p32 = obj.LinkArch{
- Arch: sys.ArchAMD64P32,
- Init: instinit,
- Preprocess: preprocess,
- Assemble: span6,
- Progedit: progedit,
- UnaryDst: unaryDst,
- DWARFRegisters: AMD64DWARFRegisters,
-}
-
var Link386 = obj.LinkArch{
Arch: sys.Arch386,
Init: instinit,
fmt.Fprintf(tw, " %s:%d\t%#x\t", base(file), line, pc)
}
- if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" || d.goarch == "amd64p32" {
+ if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" {
// Print instruction as bytes.
fmt.Fprintf(tw, "%x", code[i:i+size])
} else {
}
var disasms = map[string]disasmFunc{
- "386": disasm_386,
- "amd64": disasm_amd64,
- "amd64p32": disasm_amd64,
- "arm": disasm_arm,
- "arm64": disasm_arm64,
- "ppc64": disasm_ppc64,
- "ppc64le": disasm_ppc64,
+ "386": disasm_386,
+ "amd64": disasm_amd64,
+ "arm": disasm_arm,
+ "arm64": disasm_arm64,
+ "ppc64": disasm_ppc64,
+ "ppc64le": disasm_ppc64,
}
var byteOrders = map[string]binary.ByteOrder{
- "386": binary.LittleEndian,
- "amd64": binary.LittleEndian,
- "amd64p32": binary.LittleEndian,
- "arm": binary.LittleEndian,
- "arm64": binary.LittleEndian,
- "ppc64": binary.BigEndian,
- "ppc64le": binary.LittleEndian,
- "s390x": binary.BigEndian,
+ "386": binary.LittleEndian,
+ "amd64": binary.LittleEndian,
+ "arm": binary.LittleEndian,
+ "arm64": binary.LittleEndian,
+ "ppc64": binary.BigEndian,
+ "ppc64le": binary.LittleEndian,
+ "s390x": binary.BigEndian,
}
type Liner interface {
import "encoding/binary"
// ArchFamily represents a family of one or more related architectures.
-// For example, amd64 and amd64p32 are both members of the AMD64 family,
-// and ppc64 and ppc64le are both members of the PPC64 family.
+// For example, ppc64 and ppc64le are both members of the PPC64 family.
type ArchFamily byte
const (
MinLC: 1,
}
-var ArchAMD64P32 = &Arch{
- Name: "amd64p32",
- Family: AMD64,
- ByteOrder: binary.LittleEndian,
- PtrSize: 4,
- RegSize: 8,
- MinLC: 1,
-}
-
var ArchARM = &Arch{
Name: "arm",
Family: ARM,
var Archs = [...]*Arch{
Arch386,
ArchAMD64,
- ArchAMD64P32,
ArchARM,
ArchARM64,
ArchMIPS,
func Init() (*sys.Arch, ld.Arch) {
arch := sys.ArchAMD64
- if objabi.GOARCH == "amd64p32" {
- arch = sys.ArchAMD64P32
- }
theArch := ld.Arch{
Funcalign: funcAlign,
os.Exit(2)
case "386":
arch, theArch = x86.Init()
- case "amd64", "amd64p32":
+ case "amd64":
arch, theArch = amd64.Init()
case "arm":
arch, theArch = arm.Init()
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 amd64p32 386 arm ppc64le ppc64 s390x arm64
+// +build amd64 386 arm ppc64le ppc64 s390x arm64
package md5
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !amd64,!amd64p32,!386,!arm,!ppc64le,!ppc64,!s390x,!arm64
+// +build !amd64,!386,!arm,!ppc64le,!ppc64,!s390x,!arm64
package md5
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64p32 arm 386 s390x
+// +build arm 386 s390x
package sha1
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !amd64,!amd64p32,!386,!arm,!s390x,!arm64
+// +build !amd64,!386,!arm,!s390x,!arm64
package sha1
package build
+// List of past, present, and future known GOOS and GOARCH values.
+// Do not remove from this list, as these are used for go/build filename matching.
+
const goosList = "aix android darwin dragonfly freebsd hurd illumos js linux nacl netbsd openbsd plan9 solaris windows zos "
const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc riscv riscv64 s390 s390x sparc sparc64 wasm "
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !amd64,!amd64p32,!s390x,!ppc64le,!arm64
+// +build !amd64,!s390x,!ppc64le,!arm64
package crc32
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !386,!amd64,!amd64p32,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le
+// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le
package bytealg
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 amd64 amd64p32 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le
+// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le
package bytealg
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !386,!amd64,!amd64p32,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!wasm
+// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!wasm
package bytealg
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 amd64 amd64p32 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le wasm
+// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le wasm
package bytealg
// +build !386
// +build !amd64
-// +build !amd64p32
// +build !arm
// +build !arm64
// +build !ppc64
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 amd64 amd64p32
+// +build 386 amd64
package cpu
{Name: "sse42", Feature: &X86.HasSSE42},
{Name: "ssse3", Feature: &X86.HasSSSE3},
- // These capabilities should always be enabled on amd64(p32):
- {Name: "sse2", Feature: &X86.HasSSE2, Required: GOARCH == "amd64" || GOARCH == "amd64p32"},
+ // These capabilities should always be enabled on amd64:
+ {Name: "sse2", Feature: &X86.HasSSE2, Required: GOARCH == "amd64"},
}
maxID, _, _, _ := cpuid(0, 0)
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 amd64 amd64p32
+// +build 386 amd64
#include "textflag.h"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 amd64 amd64p32
+// +build 386 amd64
package cpu_test
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 amd64p32
+// +build amd64
package math
func init() {
var argAlign uintptr = PtrSize
- if runtime.GOARCH == "amd64p32" {
- argAlign = 2 * PtrSize
- }
roundup := func(x uintptr, a uintptr) uintptr {
return (x + a - 1) / a * a
}
package reflect
import (
- "runtime"
"strconv"
"sync"
"unicode"
offset += arg.size
}
argSize = offset
- if runtime.GOARCH == "amd64p32" {
- offset += -offset & (8 - 1)
- }
offset += -offset & (ptrSize - 1)
retOffset = offset
for _, res := range t.out() {
size: offset,
ptrdata: uintptr(ptrmap.n) * ptrSize,
}
- if runtime.GOARCH == "amd64p32" {
- x.align = 8
- }
if ptrmap.n > 0 {
x.gcdata = &ptrmap.data[0]
}
// Copy results back into argument frame.
if numOut > 0 {
off += -off & (ptrSize - 1)
- if runtime.GOARCH == "amd64p32" {
- off = align(off, 8)
- }
for i, typ := range ftyp.out() {
v := out[i]
if v.typ == nil {
// Copy in receiver and rest of args.
storeRcvr(rcvr, scratch)
- // Align the first arg. Only on amd64p32 the alignment can be
- // larger than ptrSize.
+ // Align the first arg. The alignment can't be larger than ptrSize.
argOffset := uintptr(ptrSize)
if len(t.in()) > 0 {
argOffset = align(argOffset, uintptr(t.in()[0].align))
// and then copies the results back into scratch.
call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset))
- // Copy return values. On amd64p32, the beginning of return values
- // is 64-bit aligned, so the caller's frame layout (which doesn't have
- // a receiver) is different from the layout of the fn call, which has
- // a receiver.
+ // Copy return values.
// Ignore any changes to args and just copy return values.
// Avoid constructing out-of-bounds pointers if there are no return values.
if frametype.size-retOffset > 0 {
callerRetOffset := retOffset - argOffset
- if runtime.GOARCH == "amd64p32" {
- callerRetOffset = align(argSize-argOffset, 8)
- }
// This copies to the stack. Write barriers are not needed.
memmove(add(frame, callerRetOffset, "frametype.size > retOffset"),
add(scratch, retOffset, "frametype.size > retOffset"),
#ifdef GOARCH_386
#define SKIP4 BYTE $0x90; BYTE $0x90; BYTE $0x90; BYTE $0x90
#endif
-#ifdef GOARCH_amd64p32
-#define SKIP4 BYTE $0x90; BYTE $0x90; BYTE $0x90; BYTE $0x90
-#endif
#ifdef GOARCH_wasm
#define SKIP4 UNDEF; UNDEF; UNDEF; UNDEF
#endif
typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string
}
- case "amd64p32":
- return []byte{
- typePointer, // q *int
- typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
- typePointer, typeScalar, typeScalar, // r []byte
- typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
- typePointer, typeScalar, // i string
- }
default:
panic("unknown arch")
}
#define g(r) 0(r)(TLS*1)
#endif
-#ifdef GOARCH_amd64p32
-#define get_tls(r) MOVL TLS, r
-#define g(r) 0(r)(TLS*1)
-#endif
-
#ifdef GOARCH_386
#define get_tls(r) MOVL TLS, r
#define g(r) 0(r)(TLS*1)
// xxhash: https://code.google.com/p/xxhash/
// cityhash: https://code.google.com/p/cityhash/
-// +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
+// +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
package runtime
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 amd64p32
-
package atomic
import "unsafe"
// a continual source of pain. Test that on 32-bit systems they crash
// instead of failing silently.
- switch runtime.GOARCH {
- default:
- if unsafe.Sizeof(int(0)) != 4 {
- t.Skip("test only runs on 32-bit systems")
- }
- case "amd64p32":
- // amd64p32 can handle unaligned atomics.
- t.Skipf("test not needed on %v", runtime.GOARCH)
+ if unsafe.Sizeof(int(0)) != 4 {
+ t.Skip("test only runs on 32-bit systems")
}
x := make([]uint32, 4)
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 amd64p32 arm mips mipsle
+// +build 386 arm mips mipsle
package runtime
// Information about what cpu features are available.
// Packages outside the runtime should not use these
// as they are not an external api.
- // Set on startup in asm_{386,amd64,amd64p32}.s
+ // Set on startup in asm_{386,amd64}.s
processorVersionInfo uint32
isIntel bool
lfenceBeforeRdtsc bool
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 amd64p32
+// +build amd64
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package runtime
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 arm amd64p32 mips mipsle
+// +build 386 arm mips mipsle
package runtime
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 amd64p32
-
package runtime
// stackcheck checks that SP is in range [g->stack.lo, g->stack.hi).
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 amd64p32 386
+// +build amd64 386
package runtime
// and ppc64le.
// Tracing won't work reliably for architectures where cputicks is emulated
// by nanotime, so the value doesn't matter for those architectures.
- traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
+ traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64)
// Maximum number of PCs in a single stack trace.
// Since events contain only stack id rather than whole stack trace,
// we can allow quite large values here.
// takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes.
// Typically this is ptrSize.
//
-// As an exception, amd64p32 has ptrSize == 4 but the CALL instruction still
-// stores an 8-byte return PC onto the stack. To accommodate this, we use regSize
+// As an exception, amd64p32 had ptrSize == 4 but the CALL instruction still
+// stored an 8-byte return PC onto the stack. To accommodate this, we used regSize
// as the size of the architecture-pushed return PC.
//
// usesLR is defined below in terms of minFrameSize, which is defined in
// Unaligned 64-bit atomics on 32-bit systems are
// a continual source of pain. Test that on 32-bit systems they crash
// instead of failing silently.
-
- switch runtime.GOARCH {
- default:
- if !arch32 {
- t.Skip("test only runs on 32-bit systems")
- }
- case "amd64p32":
- // amd64p32 can handle unaligned atomics.
- t.Skipf("test not needed on %v", runtime.GOARCH)
+ if !arch32 {
+ t.Skip("test only runs on 32-bit systems")
}
x := make([]uint32, 4)
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
-// +build 386 amd64 amd64p32 arm arm64 ppc64le mips64le mipsle wasm
+// +build 386 amd64 arm arm64 ppc64le mips64le mipsle wasm
package syscall