case obj.TYPE_CONST:
switch sz {
case 1, 2, 4, 8:
- nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Offset)
+ nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, sz, valueAddr.Offset)
default:
p.errorf("bad int size for DATA argument: %d", sz)
}
p.errorf("bad float size for DATA argument: %d", sz)
}
case obj.TYPE_SCONST:
- nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Val.(string))
+ nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, sz, valueAddr.Val.(string))
case obj.TYPE_ADDR:
if sz == p.arch.PtrSize {
- nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Sym, valueAddr.Offset)
+ nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, sz, valueAddr.Sym, valueAddr.Offset)
} else {
p.errorf("bad addr size for DATA argument: %d", sz)
}
for _, s := range f.Symbols {
switch {
case isDebugInts(s.Name):
- if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
+ if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
}
}
case isDebugFloats(s.Name):
- if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
+ if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
- if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
+ if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
- if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
+ if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
if len(args) == 0 {
t.Skip("no C++ compiler")
}
- testenv.MustHaveExecPath(t, string(args[0]))
+ testenv.MustHaveExecPath(t, args[0])
}
var (
bin = cmdToRun("./testp")
ccOut := goEnv("CC")
- cc = []string{string(ccOut)}
+ cc = []string{ccOut}
out := goEnv("GOGCCFLAGS")
quote := '\000'
start := 0
lastSpace := true
backslash := false
- s := string(out)
+ s := out
for i, c := range s {
if quote == '\000' && unicode.IsSpace(c) {
if !lastSpace {
start := 0
lastSpace := true
backslash := false
- s := string(out)
+ s := out
for i, c := range s {
if quote == '\000' && unicode.IsSpace(c) {
if !lastSpace {
start := 0
lastSpace := true
backslash := false
- s := string(out)
+ s := out
for i, c := range s {
if quote == '\000' && unicode.IsSpace(c) {
if !lastSpace {
}
var hv [16]byte
for i := 0; i < 16; i++ {
- nib := string(mhash[i*2 : i*2+2])
+ nib := mhash[i*2 : i*2+2]
x, err := strconv.ParseInt(nib, 16, 32)
if err != nil {
base.Fatalf("metahash bad byte %q", nib)
// function is inlinable.
func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) {
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body))
+ fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), fn.Body)
} else if base.Flag.LowerM != 0 {
fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
}
// Clear old bits.
*(*uint8)(f) &^= 3 << shift
// Set new bits.
- *(*uint8)(f) |= uint8(b&3) << shift
+ *(*uint8)(f) |= (b & 3) << shift
}
type bitset16 uint16
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.AddRestSourceArgs([]obj.Addr{
- {Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 5) & 0x1fffffffff)},
- {Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 0) & 0x1f)},
+ {Type: obj.TYPE_CONST, Offset: (v.AuxInt >> 5) & 0x1fffffffff},
+ {Type: obj.TYPE_CONST, Offset: (v.AuxInt >> 0) & 0x1f},
})
case ssa.OpLOONG64ADDshiftLLV:
edit(r.curfn)
})
- body := ir.Nodes(r.curfn.Body)
+ body := r.curfn.Body
// Reparent any declarations into the caller function.
for _, name := range r.curfn.Dcl {
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
p.Reg = v.Args[0].Reg()
- p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(sh)}
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh}
p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
// Auxint holds mask
if s == nil || s.s == nil {
return 0
}
- return s.s.cap() + int(s.first)
+ return s.s.cap() + s.first
}
// size returns the number of entries stored in s
}
if ctxt.UseBASEntries {
- listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(begin))
- listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(end))
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, begin)
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, end)
} else {
- listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(begin))
- listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(end))
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, begin)
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, end)
}
i += 2 * ctxt.Arch.PtrSize
min := -int64(1) << (n - 1)
max := int64(1) << (n - 1)
for c := int64(1); c < max; c++ {
- if !smagicOK(n, int64(c)) {
+ if !smagicOK(n, c) {
continue
}
m := int64(smagic(n, c).m)
if c>>(n-1) != 0 {
continue // not appropriate for the given n.
}
- if !smagicOK(n, int64(c)) {
+ if !smagicOK(n, c) {
t.Errorf("expected n=%d c=%d to pass\n", n, c)
}
- m := smagic(n, int64(c)).m
- s := smagic(n, int64(c)).s
+ m := smagic(n, c).m
+ s := smagic(n, c).s
C := new(big.Int).SetInt64(c)
M := new(big.Int).SetUint64(m)
minI := -int64(1) << (n - 1)
maxI := int64(1) << (n - 1)
for c := int64(1); c < maxI; c++ {
- if !sdivisibleOK(n, int64(c)) {
+ if !sdivisibleOK(n, c) {
continue
}
- k := sdivisible(n, int64(c)).k
- m := sdivisible(n, int64(c)).m
- a := sdivisible(n, int64(c)).a
- max := sdivisible(n, int64(c)).max
+ k := sdivisible(n, c).k
+ m := sdivisible(n, c).m
+ a := sdivisible(n, c).a
+ max := sdivisible(n, c).max
mask := ^uint64(0) >> (64 - n)
for i := minI; i < maxI; i++ {
want := i%c == 0
if c>>(n-1) != 0 {
continue // not appropriate for the given n.
}
- if !sdivisibleOK(n, int64(c)) {
+ if !sdivisibleOK(n, c) {
t.Errorf("expected n=%d c=%d to pass\n", n, c)
}
- k := sdivisible(n, int64(c)).k
- m := sdivisible(n, int64(c)).m
- a := sdivisible(n, int64(c)).a
- max := sdivisible(n, int64(c)).max
+ k := sdivisible(n, c).k
+ m := sdivisible(n, c).m
+ a := sdivisible(n, c).a
+ max := sdivisible(n, c).max
mask := ^uint64(0) >> (64 - n)
C := new(big.Int).SetInt64(c)
if isLittleEndian && shift0 != 0 {
sv = rightShift(root.Block, root.Pos, sv, shift0)
}
- shiftedSize = int64(aTotalSize - a[0].size)
+ shiftedSize = aTotalSize - a[0].size
if isBigEndian && shift0-shiftedSize*8 != 0 {
sv = rightShift(root.Block, root.Pos, sv, shift0-shiftedSize*8)
}
return int64(i)
}
func int64ToAuxInt(i int64) int64 {
- return int64(i)
+ return i
}
func uint8ToAuxInt(i uint8) int64 {
return int64(int8(i))
mb, me = men, mbn
}
- return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
+ return int64(me) | int64(mb<<8) | rotate<<16 | nbits<<24
}
// Merge (RLDICL [encoded] (SRDconst [s] x)) into (RLDICL [new_encoded] x)
func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
mask_1 := uint64(0xFFFFFFFF >> uint(srw))
// for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left.
- mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(sld))
// Rewrite mask to apply after the final left shift.
mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 {
return 0
}
- return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32)
+ return encodePPC64RotateMask(r_3, int64(mask_3), 32)
}
// Test if a doubleword shift right feeding into a CLRLSLDI can be merged into RLWINM.
func mergePPC64ClrlsldiSrd(sld, srd int64) int64 {
mask_1 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(srd)
// for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left.
- mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(sld))
// Rewrite mask to apply after the final left shift.
mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
if v1&mask_3 != 0 {
return 0
}
- return encodePPC64RotateMask(int64(r_3&31), int64(mask_3), 32)
+ return encodePPC64RotateMask(r_3&31, int64(mask_3), 32)
}
// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return
switch f.Sym.Name {
case "Size_":
v.reset(ptrSizedOpConst)
- v.AuxInt = int64(t.Size())
+ v.AuxInt = t.Size()
return v
case "PtrBytes":
v.reset(ptrSizedOpConst)
- v.AuxInt = int64(types.PtrDataSize(t))
+ v.AuxInt = types.PtrDataSize(t)
return v
case "Hash":
v.reset(OpConst32)
// re-visit all uses of value if its lattice is changed
newLt := t.getLatticeCell(val)
if !equals(newLt, oldLt) {
- if int8(oldLt.tag) > int8(newLt.tag) {
+ if oldLt.tag > newLt.tag {
t.f.Fatalf("Must lower lattice\n")
}
t.addUses(val)
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
- u64res = uint64(x) / 3
+ u64res = x / 3
}
})
b.Run("5", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
- u64res = uint64(x) / 5
+ u64res = x / 5
}
})
b.Run("37", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
- u64res = uint64(x) / 37
+ u64res = x / 37
}
})
b.Run("1234567", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
- u64res = uint64(x) / 1234567
+ u64res = x / 1234567
}
})
}
}
var (
- bSlashSlash = []byte(slashSlash)
- bStarSlash = []byte(starSlash)
- bSlashStar = []byte(slashStar)
+ bSlashSlash = slashSlash
+ bStarSlash = starSlash
+ bSlashStar = slashStar
bPlusBuild = []byte("+build")
goBuildComment = []byte("//go:build")
}
rev := buf[:i]
- secs, err := strconv.ParseInt(string(buf[i+1:]), 10, 64)
+ secs, err := strconv.ParseInt(buf[i+1:], 10, 64)
if err != nil {
return "", time.Time{}, fmt.Errorf("unrecognized VCS tool output: %v", err)
}
// unitchecker emits a JSON map of the form:
// output maps Package ID -> Analyzer.Name -> (error | []Diagnostic);
var tree jsonTree
- if err := json.Unmarshal([]byte(stdout), &tree); err != nil {
+ if err := json.Unmarshal(stdout, &tree); err != nil {
return fmt.Errorf("parsing JSON: %v", err)
}
for _, units := range tree {
if cmd.Cmd == imacho.LC_UUID {
// The UUID is the data in the LC_UUID load command,
// skipping over the 8-byte command header.
- return int64(reader.Offset() + 8), int64(cmd.Len - 8), true
+ return reader.Offset() + 8, int64(cmd.Len - 8), true
}
}
return 0, 0, false
// SymOff returns the offset of the i-th symbol.
func (r *Reader) SymOff(i uint32) uint32 {
- return r.h.Offsets[BlkSymdef] + uint32(i*SymSize)
+ return r.h.Offsets[BlkSymdef] + i*SymSize
}
// Sym returns a pointer to the i-th symbol.
// Note: here i is the index of short hashed symbols, not all symbols
// (unlike other accessors).
func (r *Reader) Hash64(i uint32) uint64 {
- off := r.h.Offsets[BlkHash64] + uint32(i*Hash64Size)
+ off := r.h.Offsets[BlkHash64] + i*Hash64Size
return r.uint64At(off)
}
// Note: here i is the index of hashed symbols, not all symbols
// (unlike other accessors).
func (r *Reader) Hash(i uint32) *HashType {
- off := r.h.Offsets[BlkHash] + uint32(i*HashSize)
+ off := r.h.Offsets[BlkHash] + i*HashSize
return (*HashType)(unsafe.Pointer(&r.b[off]))
}
// NReloc returns the number of relocations of the i-th symbol.
func (r *Reader) NReloc(i uint32) int {
- relocIdxOff := r.h.Offsets[BlkRelocIdx] + uint32(i*4)
+ relocIdxOff := r.h.Offsets[BlkRelocIdx] + i*4
return int(r.uint32At(relocIdxOff+4) - r.uint32At(relocIdxOff))
}
// RelocOff returns the offset of the j-th relocation of the i-th symbol.
func (r *Reader) RelocOff(i uint32, j int) uint32 {
- relocIdxOff := r.h.Offsets[BlkRelocIdx] + uint32(i*4)
+ relocIdxOff := r.h.Offsets[BlkRelocIdx] + i*4
relocIdx := r.uint32At(relocIdxOff)
return r.h.Offsets[BlkReloc] + (relocIdx+uint32(j))*uint32(RelocSize)
}
func init() {
// f assigns dwarfregisters[from:to] = (base):(step*(to-from)+base)
f := func(from, to, base, step int16) {
- for r := int16(from); r <= to; r++ {
+ for r := from; r <= to; r++ {
ARMDWARFRegisters[r] = step*(r-from) + base
}
}
switch p.As {
case obj.APCALIGN, obj.APCALIGNMAX:
v := obj.AlignmentPaddingLength(int32(p.Pc), p, c.ctxt)
- for i := 0; i < int(v/4); i++ {
+ for i := 0; i < v/4; i++ {
// emit ANOOP instruction by the padding size
buf.emit(OP_NOOP)
}
// Handle smaller unaligned and negative offsets via addition or subtraction.
if v >= -4095 && v <= 4095 {
- o1 = c.oaddi12(p, v, REGTMP, int16(rt))
+ o1 = c.oaddi12(p, v, REGTMP, rt)
o2 = c.olsr12u(p, c.opstr(p, p.As), 0, REGTMP, rf)
break
}
// Handle smaller unaligned and negative offsets via addition or subtraction.
if v >= -4095 && v <= 4095 {
- o1 = c.oaddi12(p, v, REGTMP, int16(rf))
+ o1 = c.oaddi12(p, v, REGTMP, rf)
o2 = c.olsr12u(p, c.opldr(p, p.As), 0, REGTMP, rt)
break
}
if p.Pool != nil {
c.ctxt.Diag("%v: unused constant in pool (%v)\n", p, v)
}
- o1 = c.oaddi(p, AADD, lo, REGTMP, int16(rf))
+ o1 = c.oaddi(p, AADD, lo, REGTMP, rf)
o2 = c.oaddi(p, AADD, hi, REGTMP, REGTMP)
o3 = c.opldpstp(p, o, 0, REGTMP, rt1, rt2, 1)
break
if p.Pool != nil {
c.ctxt.Diag("%v: unused constant in pool (%v)\n", p, v)
}
- o1 = c.oaddi(p, AADD, lo, REGTMP, int16(rt))
+ o1 = c.oaddi(p, AADD, lo, REGTMP, rt)
o2 = c.oaddi(p, AADD, hi, REGTMP, REGTMP)
o3 = c.opldpstp(p, o, 0, REGTMP, rf1, rf2, 0)
break
}
o1 = c.opirr(p, p.As)
- o1 |= (uint32(r&31) << 5) | (uint32((imm>>3)&0xfff) << 10) | (uint32(v & 31))
+ o1 |= (uint32(r&31) << 5) | ((imm >> 3) & 0xfff << 10) | (v & 31)
case 92: /* vmov Vn.<T>[index], Vd.<T>[index] */
rf := int(p.From.Reg)
out[3] = o4
out[4] = o5
- return int(o.size(c.ctxt, p) / 4)
+ return o.size(c.ctxt, p) / 4
}
func (c *ctxt7) addrRelocType(p *obj.Prog) objabi.RelocType {
// pack returns the encoding of the "Q" field and two arrangement specifiers.
func pack(q uint32, arngA, arngB uint8) uint32 {
- return uint32(q)<<16 | uint32(arngA)<<8 | uint32(arngB)
+ return q<<16 | uint32(arngA)<<8 | uint32(arngB)
}
// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement.
func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error {
- Rnum := (reg & 31) + int16(num<<5)
+ Rnum := (reg & 31) + num<<5
if isAmount {
if num < 0 || num > 7 {
return errors.New("index shift amount is out of range")
c.AddInt(s, 2, int64(i))
}
func (c dwCtxt) AddUint8(s dwarf.Sym, i uint8) {
- b := []byte{byte(i)}
+ b := []byte{i}
c.AddBytes(s, b)
}
func (c dwCtxt) AddBytes(s dwarf.Sym, b []byte) {
ft.Params = make([]WasmField, readUint32())
for i := range ft.Params {
ft.Params[i].Type = WasmFieldType(readByte())
- ft.Params[i].Offset = int64(readInt64())
+ ft.Params[i].Offset = readInt64()
}
ft.Results = make([]WasmField, readUint32())
for i := range ft.Results {
ft.Results[i].Type = WasmFieldType(readByte())
- ft.Results[i].Offset = int64(readInt64())
+ ft.Results[i].Offset = readInt64()
}
}
func init() {
// f assigns dwarfregisters[from:to] = (base):(to-from+base)
f := func(from, to, base int16) {
- for r := int16(from); r <= to; r++ {
+ for r := from; r <= to; r++ {
LOONG64DWARFRegisters[r] = (r - from) + base
}
}
func init() {
// f assigns dwarfregisters[from:to] = (base):(to-from+base)
f := func(from, to, base int16) {
- for r := int16(from); r <= to; r++ {
+ for r := from; r <= to; r++ {
MIPSDWARFRegisters[r] = (r - from) + base
}
}
w.Uint32(uint32(dataOff))
dataOff += int64(len(s.P))
if file := s.File(); file != nil {
- dataOff += int64(file.Size)
+ dataOff += file.Size
}
}
}
func init() {
// f assigns dwarfregister[from:to] = (base):(to-from+base)
f := func(from, to, base int16) {
- for r := int16(from); r <= to; r++ {
+ for r := from; r <= to; r++ {
PPC64DWARFRegisters[r] = r - from + base
}
}
case 9: /* RLDC Ra, $sh, $mb, Rb */
sh := uint32(p.RestArgs[0].Addr.Offset) & 0x3F
mb := uint32(p.RestArgs[1].Addr.Offset) & 0x3F
- o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (uint32(sh) & 0x1F))
+ o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (sh & 0x1F))
o1 |= (sh & 0x20) >> 4 // sh[5] is placed in bit 1.
o1 |= (mb & 0x1F) << 6 // mb[0:4] is placed in bits 6-10.
o1 |= (mb & 0x20) // mb[5] is placed in bit 5
if n > b || b > 63 {
c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
}
- o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
+ o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), r, uint32(n), uint32(b)-uint32(n))
default:
c.ctxt.Diag("unexpected op in rldc case\n%v", p)
c.ctxt.Diag("%v is not supported", p)
}
if o.ispfx {
- o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
+ o1, o2 = pfxadd(p.To.Reg, int16(r), PFX_R_ABS, d)
} else if o.size == 8 {
o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) // tmp = uint16(d)
o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from
} else {
// For backwards compatibility with GOPPC64 < 10, generate 34b constants in register.
o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000)
- o2 = loadl16(REGTMP, int64(d>>16)) // tmp |= (d>>16)&0xFFFF
+ o2 = loadl16(REGTMP, d>>16) // tmp |= (d>>16)&0xFFFF
o3 = AOP_MD(OP_RLDICR, REGTMP, REGTMP, 16, 63-16) // tmp <<= 16
o4 = loadl16(REGTMP, int64(uint16(d))) // tmp |= d&0xFFFF
o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
if o.ispfx {
if rel == nil {
- o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
+ o1, o2 = pfxadd(p.To.Reg, int16(r), PFX_R_ABS, v)
} else {
- o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
+ o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_PCREL, 0)
rel.Type = objabi.R_ADDRPOWER_PCREL34
}
}
v |= 1 << 8
}
- o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
+ o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | v<<12
case 70: /* cmp* r,r,cr or cmp*i r,i,cr or fcmp f,f,cr or cmpeqb r,r */
r := uint32(p.Reg&7) << 2
to_done = p
} else {
// large stack: SP-framesize < stackguard-StackSmall
- offset := int64(framesize) - abi.StackSmall
+ offset := framesize - abi.StackSmall
if framesize > abi.StackBig {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
if err != nil {
p.Ctxt.Diag("%v: %v", p, err)
}
- ins.imm = int64(vtype)
+ ins.imm = vtype
if ins.as == AVSETIVLI {
if p.From.Type != obj.TYPE_CONST {
p.Ctxt.Diag("%v: expected immediate value", p)
func init() {
// f assigns dwarfregisters[from:to by step] = (base):((to-from)/step+base)
f := func(from, step, to, base int16) {
- for r := int16(from); r <= to; r += step {
+ for r := from; r <= to; r += step {
S390XDWARFRegisters[r] = (r-from)/step + base
}
}
case ARISBLG, ARISBLGZ:
opcode = op_RISBLG
}
- zRIE(_f, uint32(opcode), uint32(r1), uint32(r2), 0, uint32(i3), uint32(i4), 0, uint32(i5), asm)
+ zRIE(_f, opcode, uint32(r1), uint32(r2), 0, uint32(i3), uint32(i4), 0, uint32(i5), asm)
case 15: // br/bl (reg)
r := p.To.Reg
}
switch p.As {
case ASUB:
- zRIL(_a, op_LGFI, uint32(regtmp(p)), uint32(v), asm)
- zRRF(op_SLGRK, uint32(regtmp(p)), 0, uint32(p.To.Reg), uint32(r), asm)
+ zRIL(_a, op_LGFI, regtmp(p), uint32(v), asm)
+ zRRF(op_SLGRK, regtmp(p), 0, uint32(p.To.Reg), uint32(r), asm)
case ASUBC:
if r != p.To.Reg {
zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
if opcode == op_MVI {
opcode = op_MVIY
} else {
- zRXY(op_LAY, uint32(regtmp(p)), 0, uint32(r), uint32(d), asm)
+ zRXY(op_LAY, regtmp(p), 0, uint32(r), uint32(d), asm)
r = int16(regtmp(p))
d = 0
}
// OutMask provides a mask representing the selected bits.
func (r RotateParams) OutMask() uint64 {
// Note: z must be unsigned for bootstrap compiler
- z := uint8(63-r.End+r.Start) & 63 // number of zero bits in mask
+ z := (63 - r.End + r.Start) & 63 // number of zero bits in mask
return bits.RotateLeft64(^uint64(0)<<z, -int(r.Start))
}
v := obj.AlignmentPadding(c, p, ctxt, s)
if v > 0 {
s.Grow(int64(c) + int64(v))
- fillnop(s.P[c:], int(v))
+ fillnop(s.P[c:], v)
}
p.Pc = int64(c)
c += int32(v)
// Literal Z cases usually have "Zlit" in their name (Zlit, Zlitr_m, Zlitm_r).
func (ab *AsmBuf) PutOpBytesLit(offset int, op *opBytes) {
for int(op[offset]) != 0 {
- ab.Put1(byte(op[offset]))
+ ab.Put1(op[offset])
offset++
}
}
have := regIndex(int16(reg))
want := index
if have != want {
- regName := rconv(int(reg))
+ regName := rconv(reg)
t.Errorf("regIndex(%s):\nhave: %d\nwant: %d",
regName, have, want)
}
// We don't need the data for non-hashed symbols, yet.
panic("not supported")
}
- i := uint32(s.SymIdx + uint32(r.NSym()+r.NHashed64def()))
+ i := s.SymIdx + uint32(r.NSym()+r.NHashed64def())
return r.BytesAt(r.DataOff(i), r.DataSize(i))
}
// We treat the whole object file as the text section.
func (f *goobjFile) text() (textStart uint64, text []byte, err error) {
text = make([]byte, f.goobj.Size)
- _, err = f.f.ReadAt(text, int64(f.goobj.Offset))
+ _, err = f.f.ReadAt(text, f.goobj.Offset)
return
}
case N_DEBUG:
sym.Code = '?'
default:
- if s.SectionNumber < 0 || len(f.xcoff.Sections) < int(s.SectionNumber) {
+ if s.SectionNumber < 0 || len(f.xcoff.Sections) < s.SectionNumber {
return nil, fmt.Errorf("invalid section number in symbol table")
}
sect := f.xcoff.Sections[s.SectionNumber-1]
if s.SectionNumber <= 0 {
return nil, fmt.Errorf("symbol %s: invalid section number %d", name, s.SectionNumber)
}
- if len(f.Sections) < int(s.SectionNumber) {
+ if len(f.Sections) < s.SectionNumber {
return nil, fmt.Errorf("symbol %s: section number %d is larger than max %d", name, s.SectionNumber, len(f.Sections))
}
return s, nil
} else {
ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ))
}
- rela.AddAddrPlus(target.Arch, targ, int64(r.Add()))
+ rela.AddAddrPlus(target.Arch, targ, r.Add())
// Not mark r done here. So we still apply it statically,
// so in the file content we'll also have the right offset
// to the relocation target. So it can be examined statically
for i := 0; ; i++ {
oName := ldr.SymName(rs)
name := oName + fmt.Sprintf("%+d-tramp%d", offset, i)
- tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs)))
+ tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs))
ldr.SetAttrReachable(tramp, true)
if ldr.SymType(tramp) == sym.SDYNIMPORT {
// don't reuse trampoline defined in other module
} else {
ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ))
}
- rela.AddAddrPlus(target.Arch, targ, int64(r.Add()))
+ rela.AddAddrPlus(target.Arch, targ, r.Add())
// Not mark r done here. So we still apply it statically,
// so in the file content we'll also have the right offset
// to the relocation target. So it can be examined statically
if r.Siz() == 8 {
val = r.Add()
} else if target.IsBigEndian() {
- val = int64(uint32(val)) | int64(r.Add())<<32
+ val = int64(uint32(val)) | r.Add()<<32
} else {
val = val>>32<<32 | int64(uint32(r.Add()))
}
// R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
// turn ADRP to MOVZ
- o0 = 0xd2a00000 | uint32(o0&0x1f) | (uint32((v>>16)&0xffff) << 5)
+ o0 = 0xd2a00000 | o0&0x1f | (uint32((v>>16)&0xffff) << 5)
// R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
// turn LD64 to MOVK
if v&3 != 0 {
ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", v)
}
- o1 = 0xf2800000 | uint32(o1&0x1f) | (uint32(v&0xffff) << 5)
+ o1 = 0xf2800000 | o1&0x1f | (uint32(v&0xffff) << 5)
// when laid out, the instruction order must always be o0, o1.
if target.IsBigEndian() {
for i := 0; ; i++ {
oName := ldr.SymName(rs)
name := oName + fmt.Sprintf("%+x-tramp%d", r.Add(), i)
- tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs)))
+ tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs))
ldr.SetAttrReachable(tramp, true)
if ldr.SymType(tramp) == sym.SDYNIMPORT {
// don't reuse trampoline defined in other module
}
case objabi.R_DWTXTADDR_U1, objabi.R_DWTXTADDR_U2, objabi.R_DWTXTADDR_U3, objabi.R_DWTXTADDR_U4:
unit := ldr.SymUnit(rs)
- if idx, ok := unit.Addrs[sym.LoaderSym(rs)]; ok {
+ if idx, ok := unit.Addrs[rs]; ok {
o = int64(idx)
} else {
st.err.Errorf(s, "missing .debug_addr index relocation target %s", ldr.SymName(rs))
case objabi.R_ADDRCUOFF:
// debug_range and debug_loc elements use this relocation type to get an
// offset from the start of the compile unit.
- o = ldr.SymValue(rs) + r.Add() - ldr.SymValue(loader.Sym(ldr.SymUnit(rs).Textp[0]))
+ o = ldr.SymValue(rs) + r.Add() - ldr.SymValue(ldr.SymUnit(rs).Textp[0])
// r.Sym() can be 0 when CALL $(constant) is transformed from absolute PC to relative PC call.
case objabi.R_GOTPCREL:
if rst != sym.SHOSTOBJ {
o += int64(uint64(ldr.SymValue(rs)) - ldr.SymSect(rs).Vaddr)
}
- o -= int64(off) // relative to section offset, not symbol
+ o -= off // relative to section offset, not symbol
}
} else {
o += int64(siz)
return rr, false
}
rs := r.Sym()
- rr.Xsym = loader.Sym(ldr.SymSect(rs).Sym)
+ rr.Xsym = ldr.SymSect(rs).Sym
rr.Xadd = r.Add() + ldr.SymValue(rs) - int64(ldr.SymSect(rs).Vaddr)
// r.Sym() can be 0 when CALL $(constant) is transformed from absolute PC to relative PC call.
s := dwarfp[i].secSym()
sect := state.allocateNamedDataSection(&Segdwarf, ldr.SymName(s), []sym.SymKind{}, 04)
ldr.SetSymSect(s, sect)
- sect.Sym = sym.LoaderSym(s)
+ sect.Sym = s
curType := ldr.SymType(s)
state.setSymType(s, sym.SRODATA)
ldr.SetSymValue(s, int64(uint64(state.datsize)-sect.Vaddr))
func (d *dwctxt) calcCompUnitRanges() {
var prevUnit *sym.CompilationUnit
for _, s := range d.linkctxt.Textp {
- sym := loader.Sym(s)
+ sym := s
fi := d.ldr.FuncInfo(sym)
if !fi.Valid() {
// only create boundaries between symbols from
// different units.
sval := d.ldr.SymValue(sym)
- u0val := d.ldr.SymValue(loader.Sym(unit.Textp[0]))
+ u0val := d.ldr.SymValue(unit.Textp[0])
if prevUnit != unit {
unit.PCs = append(unit.PCs, dwarf.Range{Start: sval - u0val})
prevUnit = unit
// Output the state machine for each function remaining.
for _, s := range unit.Textp {
- fnSym := loader.Sym(s)
+ fnSym := s
_, _, _, lines := d.ldr.GetFuncDwarfAuxSyms(fnSym)
// Chain the line symbol onto the list.
// Collect up the ranges for functions in the unit.
rsize := uint64(rsu.Size())
for _, ls := range unit.RangeSyms {
- s := loader.Sym(ls)
+ s := ls
syms = append(syms, s)
rsize += uint64(d.ldr.SymSize(s))
}
var deltaBuf []byte
pcsp := obj.NewPCIter(uint32(d.arch.MinLC))
for _, s := range d.linkctxt.Textp {
- fn := loader.Sym(s)
+ fn := s
fi := d.ldr.FuncInfo(fn)
if !fi.Valid() {
continue
cu = append(cu, u.AbsFnDIEs...)
cu = append(cu, u.FuncDIEs...)
if u.Consts != 0 {
- cu = append(cu, loader.Sym(u.Consts))
+ cu = append(cu, u.Consts)
}
cu = append(cu, u.VarDIEs...)
var cusize int64
if unit.Addrs == nil {
unit.Addrs = make(map[sym.LoaderSym]uint32)
}
- if _, ok := unit.Addrs[sym.LoaderSym(rsym)]; ok {
+ if _, ok := unit.Addrs[rsym]; ok {
// already present, no work needed
} else {
sl := len(unit.Addrs)
if sl > lim {
log.Fatalf("internal error: %s relocation overflow on infosym for %s", rt.String(), d.ldr.SymName(fnsym))
}
- unit.Addrs[sym.LoaderSym(rsym)] = uint32(sl)
+ unit.Addrs[rsym] = uint32(sl)
sb.AddAddrPlus(d.arch, rsym, 0)
data := sb.Data()
if d.arch.PtrSize == 4 {
}
d.ldr.SetAttrNotInSymbolTable(infosym, true)
d.ldr.SetAttrReachable(infosym, true)
- unit.FuncDIEs = append(unit.FuncDIEs, sym.LoaderSym(infosym))
+ unit.FuncDIEs = append(unit.FuncDIEs, infosym)
if rangesym != 0 {
d.ldr.SetAttrNotInSymbolTable(rangesym, true)
d.ldr.SetAttrReachable(rangesym, true)
- unit.RangeSyms = append(unit.RangeSyms, sym.LoaderSym(rangesym))
+ unit.RangeSyms = append(unit.RangeSyms, rangesym)
}
// Walk the relocations of the subprogram DIE symbol to discover
if !d.ldr.AttrOnList(rsym) {
// abstract function
d.ldr.SetAttrOnList(rsym, true)
- unit.AbsFnDIEs = append(unit.AbsFnDIEs, sym.LoaderSym(rsym))
+ unit.AbsFnDIEs = append(unit.AbsFnDIEs, rsym)
d.importInfoSymbol(rsym)
}
continue
for _, unit := range lib.Units {
// We drop the constants into the first CU.
if consts != 0 {
- unit.Consts = sym.LoaderSym(consts)
+ unit.Consts = consts
d.importInfoSymbol(consts)
consts = 0
}
// abstract functions, visit range symbols. Note that
// Textp has been dead-code-eliminated already.
for _, s := range unit.Textp {
- d.dwarfVisitFunction(loader.Sym(s), unit)
+ d.dwarfVisitFunction(s, unit)
}
}
}
if varDIE != 0 {
unit := d.ldr.SymUnit(idx)
d.defgotype(gt)
- unit.VarDIEs = append(unit.VarDIEs, sym.LoaderSym(varDIE))
+ unit.VarDIEs = append(unit.VarDIEs, varDIE)
}
}
func (d *dwctxt) dwUnitPortion(u *sym.CompilationUnit, abbrevsym loader.Sym, us *dwUnitSyms) {
if u.DWInfo.Abbrev != dwarf.DW_ABRV_COMPUNIT_TEXTLESS {
us.linesyms = d.writelines(u, us.lineProlog)
- base := loader.Sym(u.Textp[0])
+ base := u.Textp[0]
if buildcfg.Experiment.Dwarf5 {
d.writedebugaddr(u, us.addrsym)
}
var dsyms []loader.Sym
for _, s := range unit.Textp {
- fnSym := loader.Sym(s)
+ fnSym := s
// NB: this looks at SDWARFFCN; it will need to also look
// at range and loc when they get there.
infosym, locsym, rangessym, _ := d.ldr.GetFuncDwarfAuxSyms(fnSym)
len += uint64(d.ldr.SymSize(hdrsym))
su := d.ldr.MakeSymbolUpdater(hdrsym)
if isDwarf64(d.linkctxt) {
- len -= 12 // sub size of length field
- su.SetUint(d.arch, 4, uint64(len)) // 4 because of 0XFFFFFFFF
+ len -= 12 // sub size of length field
+ su.SetUint(d.arch, 4, len) // 4 because of 0XFFFFFFFF
} else {
len -= 4 // subtract size of length field
su.SetUint32(d.arch, 0, uint32(len))
func (d *dwctxt) collectUnitLocs(u *sym.CompilationUnit) []loader.Sym {
syms := []loader.Sym{}
for _, fn := range u.FuncDIEs {
- relocs := d.ldr.Relocs(loader.Sym(fn))
+ relocs := d.ldr.Relocs(fn)
for i := 0; i < relocs.Count(); i++ {
reloc := relocs.At(i)
if reloc.Type() != objabi.R_DWARFSECREF {
ldr.SetSymValue(s, int64(pos))
sect := ldr.SymSect(s)
if sect != prevSect {
- sect.Vaddr = uint64(pos)
+ sect.Vaddr = pos
prevSect = sect
}
if ldr.SubSym(s) != 0 {
func elf64shdr(out *OutBuf, e *ElfShdr) {
out.Write32(e.Name)
- out.Write32(uint32(e.Type))
- out.Write64(uint64(e.Flags))
+ out.Write32(e.Type)
+ out.Write64(e.Flags)
out.Write64(e.Addr)
out.Write64(e.Off)
out.Write64(e.Size)
func elf32shdr(out *OutBuf, e *ElfShdr) {
out.Write32(e.Name)
- out.Write32(uint32(e.Type))
+ out.Write32(e.Type)
out.Write32(uint32(e.Flags))
out.Write32(uint32(e.Addr))
out.Write32(uint32(e.Off))
func elf64writehdr(out *OutBuf) uint32 {
out.Write(ehdr.Ident[:])
- out.Write16(uint16(ehdr.Type))
- out.Write16(uint16(ehdr.Machine))
- out.Write32(uint32(ehdr.Version))
+ out.Write16(ehdr.Type)
+ out.Write16(ehdr.Machine)
+ out.Write32(ehdr.Version)
out.Write64(ehdr.Entry)
out.Write64(ehdr.Phoff)
out.Write64(ehdr.Shoff)
func elf32writehdr(out *OutBuf) uint32 {
out.Write(ehdr.Ident[:])
- out.Write16(uint16(ehdr.Type))
- out.Write16(uint16(ehdr.Machine))
- out.Write32(uint32(ehdr.Version))
+ out.Write16(ehdr.Type)
+ out.Write16(ehdr.Machine)
+ out.Write32(ehdr.Version)
out.Write32(uint32(ehdr.Entry))
out.Write32(uint32(ehdr.Phoff))
out.Write32(uint32(ehdr.Shoff))
for i := 0; i < len(Segdwarf.Sections); i++ {
sect := Segdwarf.Sections[i]
si := dwarfp[i]
- if si.secSym() != loader.Sym(sect.Sym) ||
+ if si.secSym() != sect.Sym ||
ctxt.loader.SymSect(si.secSym()) != sect {
panic("inconsistency between dwarfp and Segdwarf")
}
shstrtabAddstring := func(s string) {
off := addshstr(s)
- elfsetstring(ctxt, 0, s, int(off))
+ elfsetstring(ctxt, 0, s, off)
}
shstrtabAddstring("")
for i := 0; i < len(Segdwarf.Sections); i++ {
sect := Segdwarf.Sections[i]
si := dwarfp[i]
- if si.secSym() != loader.Sym(sect.Sym) ||
+ if si.secSym() != sect.Sym ||
ctxt.loader.SymSect(si.secSym()) != sect {
panic("inconsistency between dwarfp and Segdwarf")
}
// Uodate the __LINKEDIT segment.
segSz := sigOff + sz - int64(linkeditSeg.Offset)
mf.ByteOrder.PutUint64(tmp[:8], uint64(segSz))
- _, err = f.WriteAt(tmp[:8], int64(linkeditOff)+int64(unsafe.Offsetof(macho.Segment64{}.Memsz)))
+ _, err = f.WriteAt(tmp[:8], linkeditOff+int64(unsafe.Offsetof(macho.Segment64{}.Memsz)))
if err != nil {
return err
}
- _, err = f.WriteAt(tmp[:8], int64(linkeditOff)+int64(unsafe.Offsetof(macho.Segment64{}.Filesz)))
+ _, err = f.WriteAt(tmp[:8], linkeditOff+int64(unsafe.Offsetof(macho.Segment64{}.Filesz)))
if err != nil {
return err
}
const size = 16
inlTreeSym.SetUint8(arch, int64(i*size+0), uint8(funcID))
// Bytes 1-3 are unused.
- inlTreeSym.SetUint32(arch, int64(i*size+4), uint32(nameOff))
+ inlTreeSym.SetUint32(arch, int64(i*size+4), nameOff)
inlTreeSym.SetUint32(arch, int64(i*size+8), uint32(call.ParentPC))
inlTreeSym.SetUint32(arch, int64(i*size+12), uint32(startLine))
}
off := int64(startLocations[i])
// entryOff uint32 (offset of func entry PC from textStart)
entryOff := textOff(ctxt, s, textStart)
- off = sb.SetUint32(ctxt.Arch, off, uint32(entryOff))
+ off = sb.SetUint32(ctxt.Arch, off, entryOff)
// nameOff int32
nameOff, ok := nameOffsets[s]
if !ok {
panic("couldn't find function name offset")
}
- off = sb.SetUint32(ctxt.Arch, off, uint32(nameOff))
+ off = sb.SetUint32(ctxt.Arch, off, nameOff)
// args int32
// TODO: Move into funcinfo.
} else {
off += 12
}
- off = sb.SetUint32(ctxt.Arch, off, uint32(numPCData(ldr, s, fi)))
+ off = sb.SetUint32(ctxt.Arch, off, numPCData(ldr, s, fi))
// Store the offset to compilation unit's file table.
cuIdx := ^uint32(0)
for i := 0; i < len(Segdwarf.Sections); i++ {
sect := Segdwarf.Sections[i]
si := dwarfp[i]
- if si.secSym() != loader.Sym(sect.Sym) ||
+ if si.secSym() != sect.Sym ||
ldr.SymSect(si.secSym()) != sect {
panic("inconsistency between dwarfp and Segdwarf")
}
const pageSize = 0x1000
const pageMask = pageSize - 1
- addr := ldr.SymValue(s) + int64(r.Off()) - int64(PEBASE)
+ addr := ldr.SymValue(s) + int64(r.Off()) - PEBASE
page := uint32(addr &^ pageMask)
off := uint32(addr & pageMask)
dwsize = getDwsectCUSize(sect.Name, name)
// .debug_abbrev is common to all packages and not found with the previous function
if sect.Name == ".debug_abbrev" {
- dwsize = uint64(ldr.SymSize(loader.Sym(sect.Sym)))
+ dwsize = uint64(ldr.SymSize(sect.Sym))
}
} else {
// Dwarf relocations need the symbol number of .dw* symbols.
// It doesn't need to know it for each package, one is enough.
// currSymSrcFile.csectAux == nil means first package.
- ldr.SetSymDynid(loader.Sym(sect.Sym), int32(f.symbolCount))
+ ldr.SetSymDynid(sect.Sym, int32(f.symbolCount))
if sect.Name == ".debug_frame" && ctxt.LinkMode != LinkExternal {
// CIE size must be added to the first package.
Xsmtyp: XTY_LD, // label definition (based on C)
Xauxtype: _AUX_CSECT,
}
- a4.Xsmtyp |= uint8(xcoffAlign(ldr, x, TextSym) << 3)
+ a4.Xsmtyp |= xcoffAlign(ldr, x, TextSym) << 3
syms = append(syms, a4)
return syms
Xsmclas: XMC_PR,
Xsmtyp: XTY_SD,
}
- a4.Xsmtyp |= uint8(xcoffAlign(ldr, x, TextSym) << 3)
+ a4.Xsmtyp |= xcoffAlign(ldr, x, TextSym) << 3
syms = append(syms, a4)
}
a4.Xsmtyp |= XTY_CM
}
- a4.Xsmtyp |= uint8(xcoffAlign(ldr, x, t) << 3)
+ a4.Xsmtyp |= xcoffAlign(ldr, x, t) << 3
syms = append(syms, a4)
/* Symbol table */
for _, s := range f.loaderSymbols {
lds := &XcoffLdSym64{
- Loffset: uint32(stlen + 2),
+ Loffset: stlen + 2,
Lsmtype: s.smtype,
Lsmclas: s.smclas,
}
func asmbXcoff(ctxt *Link) {
ctxt.Out.SeekSet(0)
fileoff := int64(Segdwarf.Fileoff + Segdwarf.Filelen)
- fileoff = int64(Rnd(int64(fileoff), *FlagRound))
+ fileoff = Rnd(fileoff, *FlagRound)
xfile.sectNameToScnum = make(map[string]int16)
if !ldr.AttrReachable(s) {
continue
}
- if ldr.SymValue(s) >= int64(eaddr) {
+ if ldr.SymValue(s) >= eaddr {
break
}
for i := 0; i < len(Segdwarf.Sections); i++ {
sect := Segdwarf.Sections[i]
si := dwarfp[i]
- if si.secSym() != loader.Sym(sect.Sym) ||
+ if si.secSym() != sect.Sym ||
ldr.SymSect(si.secSym()) != sect {
panic("inconsistency between dwarfp and Segdwarf")
}
rSym = 0
} else {
var elfsym ElfSym
- if err := readelfsym(l, arch, elfobj, int(symIdx), &elfsym, 0, 0); err != nil {
+ if err := readelfsym(l, arch, elfobj, symIdx, &elfsym, 0, 0); err != nil {
return errorf("malformed elf file: %v", err)
}
elfsym.sym = symbols[symIdx]
if elfsym.sym == 0 {
- return errorf("malformed elf file: %s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", l.SymName(sect.sym), j, int(symIdx), elfsym.name, elfsym.shndx, elfsym.type_)
+ return errorf("malformed elf file: %s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", l.SymName(sect.sym), j, symIdx, elfsym.name, elfsym.shndx, elfsym.type_)
}
rSym = elfsym.sym
}
elfobj.f.MustSeek(int64(uint64(elfobj.base)+sect.off), 0)
- sect.base, sect.readOnlyMem, err = elfobj.f.Slice(uint64(sect.size))
+ sect.base, sect.readOnlyMem, err = elfobj.f.Slice(sect.size)
if err != nil {
return fmt.Errorf("short read: %v", err)
}
return pp.ver
}
r, li := l.toLocal(i)
- return int(abiToVer(r.Sym(li).ABI(), r.version))
+ return abiToVer(r.Sym(li).ABI(), r.version)
}
func (l *Loader) IsFileLocal(i Sym) bool {
// We still need to record its presence in the current
// package, as the trampoline pass expects packages
// are laid out in dependency order.
- lib.DupTextSyms = append(lib.DupTextSyms, sym.LoaderSym(gi))
+ lib.DupTextSyms = append(lib.DupTextSyms, gi)
continue // symbol in different object
}
if dupok {
- lib.DupTextSyms = append(lib.DupTextSyms, sym.LoaderSym(gi))
+ lib.DupTextSyms = append(lib.DupTextSyms, gi)
continue
}
- lib.Textp = append(lib.Textp, sym.LoaderSym(gi))
+ lib.Textp = append(lib.Textp, gi)
}
}
lists := [2][]sym.LoaderSym{lib.Textp, lib.DupTextSyms}
for i, list := range lists {
for _, s := range list {
- sym := Sym(s)
+ sym := s
if !assignedToUnit.Has(sym) {
textp = append(textp, sym)
unit := l.SymUnit(sym)
} else {
ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ))
}
- rela.AddAddrPlus(target.Arch, targ, int64(r.Add()))
+ rela.AddAddrPlus(target.Arch, targ, r.Add())
return true
}
pc := ldr.SymValue(s) + int64(r.Off())
t := calculatePCAlignedReloc(r.Type(), ldr.SymAddr(rs)+r.Add(), pc)
if r.Type() == objabi.R_LOONG64_ADDR_LO {
- return int64(val&0xffc003ff | (t << 10)), noExtReloc, isOk
+ return val&0xffc003ff | (t << 10), noExtReloc, isOk
}
- return int64(val&0xfe00001f | (t << 5)), noExtReloc, isOk
+ return val&0xfe00001f | (t << 5), noExtReloc, isOk
case objabi.R_LOONG64_TLS_LE_HI,
objabi.R_LOONG64_TLS_LE_LO:
t := ldr.SymAddr(rs) + r.Add()
if r.Type() == objabi.R_LOONG64_TLS_LE_LO {
- return int64(val&0xffc003ff | ((t & 0xfff) << 10)), noExtReloc, isOk
+ return val&0xffc003ff | ((t & 0xfff) << 10), noExtReloc, isOk
}
- return int64(val&0xfe00001f | (((t) >> 12 << 5) & 0x1ffffe0)), noExtReloc, isOk
+ return val&0xfe00001f | (((t) >> 12 << 5) & 0x1ffffe0), noExtReloc, isOk
case objabi.R_CALLLOONG64,
objabi.R_JMPLOONG64:
pc := ldr.SymValue(s) + int64(r.Off())
t := ldr.SymAddr(rs) + r.Add() - pc
- return int64(val&0xfc000000 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x3ff0000) >> 16)), noExtReloc, isOk
+ return val&0xfc000000 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x3ff0000) >> 16), noExtReloc, isOk
case objabi.R_JMP16LOONG64,
objabi.R_JMP21LOONG64:
pc := ldr.SymValue(s) + int64(r.Off())
t := ldr.SymAddr(rs) + r.Add() - pc
if r.Type() == objabi.R_JMP16LOONG64 {
- return int64(val&0xfc0003ff | (((t >> 2) & 0xffff) << 10)), noExtReloc, isOk
+ return val&0xfc0003ff | (((t >> 2) & 0xffff) << 10), noExtReloc, isOk
}
- return int64(val&0xfc0003e0 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x1f0000) >> 16)), noExtReloc, isOk
+ return val&0xfc0003e0 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x1f0000) >> 16), noExtReloc, isOk
case objabi.R_LOONG64_TLS_IE_HI,
objabi.R_LOONG64_TLS_IE_LO:
case objabi.R_LOONG64_ADD64, objabi.R_LOONG64_SUB64:
if r.Type() == objabi.R_LOONG64_ADD64 {
- return int64(val + ldr.SymAddr(rs) + r.Add()), noExtReloc, isOk
+ return val + ldr.SymAddr(rs) + r.Add(), noExtReloc, isOk
}
- return int64(val - (ldr.SymAddr(rs) + r.Add())), noExtReloc, isOk
+ return val - (ldr.SymAddr(rs) + r.Add()), noExtReloc, isOk
}
return val, 0, false
for i := 0; ; i++ {
oName := ldr.SymName(rs)
name := oName + fmt.Sprintf("%+x-tramp%d", r.Add(), i)
- tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs)))
+ tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs))
ldr.SetAttrReachable(tramp, true)
if ldr.SymType(tramp) == sym.SDYNIMPORT {
// don't reuse trampoline defined in other module
objabi.R_ADDRMIPSU:
t := ldr.SymValue(rs) + r.Add()
if r.Type() == objabi.R_ADDRMIPS {
- return int64(val&0xffff0000 | t&0xffff), noExtReloc, isOk
+ return val&0xffff0000 | t&0xffff, noExtReloc, isOk
}
- return int64(val&0xffff0000 | ((t+1<<15)>>16)&0xffff), noExtReloc, isOk
+ return val&0xffff0000 | ((t+1<<15)>>16)&0xffff, noExtReloc, isOk
case objabi.R_ADDRMIPSTLS:
// thread pointer is at 0x7000 offset from the start of TLS data area
t := ldr.SymValue(rs) + r.Add() - 0x7000
if t < -32768 || t >= 32678 {
ldr.Errorf(s, "TLS offset out of range %d", t)
}
- return int64(val&0xffff0000 | t&0xffff), noExtReloc, isOk
+ return val&0xffff0000 | t&0xffff, noExtReloc, isOk
case objabi.R_CALLMIPS,
objabi.R_JMPMIPS:
// Low 26 bits = (S + A) >> 2
t := ldr.SymValue(rs) + r.Add()
- return int64(val&0xfc000000 | (t>>2)&^0xfc000000), noExtReloc, isOk
+ return val&0xfc000000 | (t>>2)&^0xfc000000, noExtReloc, isOk
}
return val, 0, false
} else {
ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ))
}
- rela.AddAddrPlus(target.Arch, targ, int64(r.Add()))
+ rela.AddAddrPlus(target.Arch, targ, r.Add())
// Not mark r done here. So we still apply it statically,
// so in the file content we'll also have the right offset
// Look up the trampoline in case it already exists
- tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs)))
+ tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs))
if oName == "runtime.deferreturn" {
ldr.SetIsDeferReturnTramp(tramp, true)
}
}
immMask := int64(riscv.JTypeImmMask)
- val = (val &^ immMask) | int64(imm)
+ val = (val &^ immMask) | imm
return val, 0, true
if r.Add() != 0 {
name = fmt.Sprintf("%s%+x-tramp%d", oName, r.Add(), i)
}
- tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs)))
+ tramp = ldr.LookupOrCreateSym(name, ldr.SymVersion(rs))
ldr.SetAttrReachable(tramp, true)
if ldr.SymType(tramp) == sym.SDYNIMPORT {
// Do not reuse trampoline defined in other module.
if ldr.SymType(tramp) == 0 {
trampb := ldr.MakeSymbolUpdater(tramp)
ctxt.AddTramp(trampb, ldr.SymType(s))
- genCallTramp(ctxt.Arch, ctxt.LinkMode, ldr, trampb, rs, int64(r.Add()))
+ genCallTramp(ctxt.Arch, ctxt.LinkMode, ldr, trampb, rs, r.Add())
}
sb := ldr.MakeSymbolUpdater(s)
if ldr.SymValue(rs) == 0 {
ctxt.Out.WriteByte(0x60) // functype
writeUleb128(ctxt.Out, uint64(len(t.Params)))
for _, v := range t.Params {
- ctxt.Out.WriteByte(byte(v))
+ ctxt.Out.WriteByte(v)
}
writeUleb128(ctxt.Out, uint64(len(t.Results)))
for _, v := range t.Results {
- ctxt.Out.WriteByte(byte(v))
+ ctxt.Out.WriteByte(v)
}
}
Name: gs.startCause.name,
Start: ctx.elapsed(gs.startCause.time),
End: ctx.elapsed(ts),
- FromResource: uint64(gs.startCause.resource),
+ FromResource: gs.startCause.resource,
ToResource: uint64(resource),
FromStack: ctx.Stack(viewerFrames(gs.startCause.stack)),
})
}
func cgoNameinfoPTR(b []byte, sa *syscall.RawSockaddr, salen int) (int, error) {
- gerrno, err := unix.Getnameinfo(sa, salen, &b[0], len(b), nil, 0, unix.NI_NAMEREQD)
- return int(gerrno), err
+ return unix.Getnameinfo(sa, salen, &b[0], len(b), nil, 0, unix.NI_NAMEREQD)
}
func cgoSockaddrInet4(ip IP) *syscall.RawSockaddr {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = uint64(in.schedStats.gTotal)
+ out.scalar = in.schedStats.gTotal
},
},
"/sched/goroutines/not-in-go:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = uint64(in.schedStats.gNonGo)
+ out.scalar = in.schedStats.gNonGo
},
},
"/sched/goroutines/running:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = uint64(in.schedStats.gRunning)
+ out.scalar = in.schedStats.gRunning
},
},
"/sched/goroutines/runnable:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = uint64(in.schedStats.gRunnable)
+ out.scalar = in.schedStats.gRunnable
},
},
"/sched/goroutines/waiting:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = uint64(in.schedStats.gWaiting)
+ out.scalar = in.schedStats.gWaiting
},
},
"/sched/goroutines-created:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = uint64(in.schedStats.gCreated)
+ out.scalar = in.schedStats.gCreated
},
},
"/sched/latencies:seconds": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = uint64(in.schedStats.threads)
+ out.scalar = in.schedStats.threads
},
},
"/sync/mutex/wait/total:seconds": {