adduint64(d, 0); // value
else
addaddr(d, s);
- } else {
+ } else if(HEADTYPE != Hwindows) {
diag("adddynsym: unsupported binary format");
}
}
#include "../ld/pe.h"
void dynreloc(void);
+static vlong addaddrplus4(Sym *s, Sym *t, int32 add);
/*
* divide-and-conquer list-link
r->add = targ->plt;
// jmp *addr
- adduint8(rel, 0xff);
- adduint8(rel, 0x25);
- addaddr(rel, targ);
- adduint8(rel, 0x90);
- adduint8(rel, 0x90);
+ if(thechar == '8') {
+ adduint8(rel, 0xff);
+ adduint8(rel, 0x25);
+ addaddr(rel, targ);
+ adduint8(rel, 0x90);
+ adduint8(rel, 0x90);
+ } else {
+ adduint8(rel, 0xff);
+ adduint8(rel, 0x24);
+ adduint8(rel, 0x25);
+ addaddrplus4(rel, targ, 0);
+ adduint8(rel, 0x90);
+ }
} else if(r->sym->plt >= 0) {
r->sym = rel;
r->add = targ->plt;
return i;
}
+vlong
+addaddrplus4(Sym *s, Sym *t, int32 add)
+{
+ vlong i;
+ Reloc *r;
+
+ if(s->type == 0)
+ s->type = SDATA;
+ s->reachable = 1;
+ i = s->size;
+ s->size += 4;
+ symgrow(s, s->size);
+ r = addrel(s);
+ r->sym = t;
+ r->off = i;
+ r->siz = 4;
+ r->type = D_ADDR;
+ r->add = add;
+ return i;
+}
+
vlong
addpcrelplus(Sym *s, Sym *t, int32 add)
{
#define IMAGE_REL_I386_SECREL7 0x000D
#define IMAGE_REL_I386_REL32 0x0014
+#define IMAGE_REL_AMD64_ABSOLUTE 0x0000
+#define IMAGE_REL_AMD64_ADDR64 0x0001 // R_X86_64_64
+#define IMAGE_REL_AMD64_ADDR32 0x0002 // R_X86_64_PC32
+#define IMAGE_REL_AMD64_ADDR32NB 0x0003
+#define IMAGE_REL_AMD64_REL32 0x0004
+#define IMAGE_REL_AMD64_REL32_1 0x0005
+#define IMAGE_REL_AMD64_REL32_2 0x0006
+#define IMAGE_REL_AMD64_REL32_3 0x0007
+#define IMAGE_REL_AMD64_REL32_4 0x0008
+#define IMAGE_REL_AMD64_REL32_5 0x0009
+#define IMAGE_REL_AMD64_SECTION 0x000A
+#define IMAGE_REL_AMD64_SECREL 0x000B
+#define IMAGE_REL_AMD64_SECREL7 0x000C
+#define IMAGE_REL_AMD64_TOKEN 0x000D
+#define IMAGE_REL_AMD64_SREL32 0x000E
+#define IMAGE_REL_AMD64_PAIR 0x000F
+#define IMAGE_REL_AMD64_SSPAN32 0x0010
+
typedef struct PeSym PeSym;
typedef struct PeSect PeSect;
typedef struct PeObj PeObj;
default:
diag("%s: unknown relocation type %d;", pn, type);
case IMAGE_REL_I386_REL32:
+ case IMAGE_REL_AMD64_REL32:
rp->type = D_PCREL;
rp->add = 0;
break;
// load addend from image
rp->add = le32(rsect->base+rp->off);
break;
+ case IMAGE_REL_AMD64_ADDR32: // R_X86_64_PC32
+ rp->type = D_PCREL;
+ rp->add += 4;
+ break;
+ case IMAGE_REL_AMD64_ADDR64: // R_X86_64_64
+ rp->siz = 8;
+ rp->type = D_ADDR;
+ // load addend from image
+ rp->add = le64(rsect->base+rp->off);
+ break;
}
}
qsort(r, rsect->sh.NumberOfRelocations, sizeof r[0], rbyoff);
TESTQ AX, AX
JZ needtls
CALL AX
- JMP ok
+ CMPL runtime·iswindows(SB), $0
+ JEQ ok
needtls:
LEAQ runtime·tls0(SB), DI
MOVQ DI, 16(SP) // save g
MOVQ DX, 8(SP) // save SP
MOVQ BX, DI // DI = first argument in AMD64 ABI
+ MOVQ BX, CX // CX = first argument in Win64
CALL AX
// Restore registers, g, stack pointer.
threadentry(void *v)
{
ThreadStart ts;
+ void *tls0;
ts = *(ThreadStart*)v;
free(v);
/*
* Set specific keys in thread local storage.
*/
+ tls0 = (void*)LocalAlloc(LPTR, 64);
asm volatile (
+ "movq %0, %%gs:0x58\n" // MOVL tls0, 0x58(GS)
"movq %%gs:0x58, %%rax\n" // MOVQ 0x58(GS), tmp
- "movq %0, 0(%%rax)\n" // MOVQ g, 0(GS)
- "movq %1, 8(%%rax)\n" // MOVQ m, 8(GS)
- :: "r"(ts.g), "r"(ts.m) : "%rax"
+ "movq %1, 0(%%rax)\n" // MOVQ g, 0(GS)
+ "movq %2, 8(%%rax)\n" // MOVQ m, 8(GS)
+ :: "r"(tls0), "r"(ts.g), "r"(ts.m) : "%rax"
);
crosscall_amd64(ts.fn);
MOVQ $_rt0_amd64(SB), AX
MOVQ SP, DI
JMP AX
+
+DATA runtime·iswindows(SB)/4, $1
+GLOBL runtime·iswindows(SB), $4