]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile: fix noopt build
authorKeith Randall <khr@golang.org>
Sun, 28 Aug 2016 18:17:37 +0000 (11:17 -0700)
committerKeith Randall <khr@golang.org>
Sun, 28 Aug 2016 18:54:52 +0000 (18:54 +0000)
Atomic add rules were depending on CSE to combine duplicate atomic ops.
With -N, CSE doesn't run.

Redo the rules for atomic add so there's only one atomic op.
Introduce an add-to-first-part-of-tuple pseudo-ops to make the atomic add result correct.

Change-Id: Ib132247051abe5f80fefad6c197db8df8ee06427
Reviewed-on: https://go-review.googlesource.com/27991
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
src/cmd/compile/internal/amd64/ssa.go
src/cmd/compile/internal/ssa/gen/AMD64.rules
src/cmd/compile/internal/ssa/gen/AMD64Ops.go
src/cmd/compile/internal/ssa/opGen.go
src/cmd/compile/internal/ssa/rewriteAMD64.go

index f1baf0812923cd7bc68b153a9cd323a3df2810dd..aa581eebfbe4956f42551527ef8655dd8004003f 100644 (file)
@@ -888,6 +888,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
                v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
        case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT:
                v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+       case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64:
+               v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
        case ssa.OpAMD64REPSTOSQ:
                gc.Prog(x86.AREP)
                gc.Prog(x86.ASTOSQ)
index 04e888c30ae975f106fb5c83830fb640bd47c9cf..d61164ffc0dd443a1283391f228356c64c676ed7 100644 (file)
 (AtomicExchange64 ptr val mem) -> (XCHGQ val ptr mem)
 
 // Atomic adds.
-(Select0 <t> (AtomicAdd32 ptr val mem)) -> (ADDL (Select0 <t> (XADDLlock val ptr mem)) val)
-(Select1     (AtomicAdd32 ptr val mem)) ->       (Select1     (XADDLlock val ptr mem))
-(Select0 <t> (AtomicAdd64 ptr val mem)) -> (ADDQ (Select0 <t> (XADDQlock val ptr mem)) val)
-(Select1     (AtomicAdd64 ptr val mem)) ->       (Select1     (XADDQlock val ptr mem))
+(AtomicAdd32 ptr val mem) -> (AddTupleFirst32 (XADDLlock val ptr mem) val)
+(AtomicAdd64 ptr val mem) -> (AddTupleFirst64 (XADDQlock val ptr mem) val)
+(Select0 <t> (AddTupleFirst32 tuple val)) -> (ADDL val (Select0 <t> tuple))
+(Select1     (AddTupleFirst32 tuple _  )) -> (Select1 tuple)
+(Select0 <t> (AddTupleFirst64 tuple val)) -> (ADDQ val (Select0 <t> tuple))
+(Select1     (AddTupleFirst64 tuple _  )) -> (Select1 tuple)
 
 // Atomic compare and swap.
 (AtomicCompareAndSwap32 ptr old new_ mem) -> (CMPXCHGLlock ptr old new_ mem)
index b08018826bd6d305968b31ae4e497bec900c003e..235da876932ab5c4a5173943c02c88738430cc7b 100644 (file)
@@ -531,6 +531,8 @@ func init() {
                // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
                {name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true},
                {name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true},
+               {name: "AddTupleFirst32", argLength: 2}, // arg0=tuple <x,y>.  Returns <x+arg1,y>.
+               {name: "AddTupleFirst64", argLength: 2}, // arg0=tuple <x,y>.  Returns <x+arg1,y>.
 
                // Compare and swap.
                // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
index b36d8cc83f0e751e9471663b62b28f1572dacdef..ca5492ac0c742d5a00cfa423ac66a4f9eabdba83 100644 (file)
@@ -592,6 +592,8 @@ const (
        OpAMD64XCHGQ
        OpAMD64XADDLlock
        OpAMD64XADDQlock
+       OpAMD64AddTupleFirst32
+       OpAMD64AddTupleFirst64
        OpAMD64CMPXCHGLlock
        OpAMD64CMPXCHGQlock
        OpAMD64ANDBlock
@@ -6905,6 +6907,16 @@ var opcodeTable = [...]opInfo{
                        },
                },
        },
+       {
+               name:   "AddTupleFirst32",
+               argLen: 2,
+               reg:    regInfo{},
+       },
+       {
+               name:   "AddTupleFirst64",
+               argLen: 2,
+               reg:    regInfo{},
+       },
        {
                name:    "CMPXCHGLlock",
                auxType: auxSymOff,
index 15d7cc6e7b24e015debd0618e1af23570c14170e..3e481ca7252a51bc01fbc58fcceab8f66f9e0647 100644 (file)
@@ -306,6 +306,10 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAnd8(v, config)
        case OpAndB:
                return rewriteValueAMD64_OpAndB(v, config)
+       case OpAtomicAdd32:
+               return rewriteValueAMD64_OpAtomicAdd32(v, config)
+       case OpAtomicAdd64:
+               return rewriteValueAMD64_OpAtomicAdd64(v, config)
        case OpAtomicAnd8:
                return rewriteValueAMD64_OpAtomicAnd8(v, config)
        case OpAtomicCompareAndSwap32:
@@ -13469,6 +13473,46 @@ func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
                return true
        }
 }
+func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (AtomicAdd32 ptr val mem)
+       // cond:
+       // result: (AddTupleFirst32 (XADDLlock val ptr mem) val)
+       for {
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64AddTupleFirst32)
+               v0 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem))
+               v0.AddArg(val)
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(val)
+               return true
+       }
+}
+func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (AtomicAdd64 ptr val mem)
+       // cond:
+       // result: (AddTupleFirst64 (XADDQlock val ptr mem) val)
+       for {
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64AddTupleFirst64)
+               v0 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem))
+               v0.AddArg(val)
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(val)
+               return true
+       }
+}
 func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
@@ -17807,50 +17851,40 @@ func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
 func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Select0 <t> (AtomicAdd32 ptr val mem))
+       // match: (Select0 <t> (AddTupleFirst32 tuple val))
        // cond:
-       // result: (ADDL (Select0 <t> (XADDLlock val ptr mem)) val)
+       // result: (ADDL val (Select0 <t> tuple))
        for {
                t := v.Type
                v_0 := v.Args[0]
-               if v_0.Op != OpAtomicAdd32 {
+               if v_0.Op != OpAMD64AddTupleFirst32 {
                        break
                }
-               ptr := v_0.Args[0]
+               tuple := v_0.Args[0]
                val := v_0.Args[1]
-               mem := v_0.Args[2]
                v.reset(OpAMD64ADDL)
+               v.AddArg(val)
                v0 := b.NewValue0(v.Line, OpSelect0, t)
-               v1 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem))
-               v1.AddArg(val)
-               v1.AddArg(ptr)
-               v1.AddArg(mem)
-               v0.AddArg(v1)
+               v0.AddArg(tuple)
                v.AddArg(v0)
-               v.AddArg(val)
                return true
        }
-       // match: (Select0 <t> (AtomicAdd64 ptr val mem))
+       // match: (Select0 <t> (AddTupleFirst64 tuple val))
        // cond:
-       // result: (ADDQ (Select0 <t> (XADDQlock val ptr mem)) val)
+       // result: (ADDQ val (Select0 <t> tuple))
        for {
                t := v.Type
                v_0 := v.Args[0]
-               if v_0.Op != OpAtomicAdd64 {
+               if v_0.Op != OpAMD64AddTupleFirst64 {
                        break
                }
-               ptr := v_0.Args[0]
+               tuple := v_0.Args[0]
                val := v_0.Args[1]
-               mem := v_0.Args[2]
                v.reset(OpAMD64ADDQ)
+               v.AddArg(val)
                v0 := b.NewValue0(v.Line, OpSelect0, t)
-               v1 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem))
-               v1.AddArg(val)
-               v1.AddArg(ptr)
-               v1.AddArg(mem)
-               v0.AddArg(v1)
+               v0.AddArg(tuple)
                v.AddArg(v0)
-               v.AddArg(val)
                return true
        }
        return false
@@ -17858,42 +17892,30 @@ func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool {
 func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Select1     (AtomicAdd32 ptr val mem))
+       // match: (Select1     (AddTupleFirst32 tuple _  ))
        // cond:
-       // result: (Select1     (XADDLlock val ptr mem))
+       // result: (Select1 tuple)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpAtomicAdd32 {
+               if v_0.Op != OpAMD64AddTupleFirst32 {
                        break
                }
-               ptr := v_0.Args[0]
-               val := v_0.Args[1]
-               mem := v_0.Args[2]
+               tuple := v_0.Args[0]
                v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem))
-               v0.AddArg(val)
-               v0.AddArg(ptr)
-               v0.AddArg(mem)
-               v.AddArg(v0)
+               v.AddArg(tuple)
                return true
        }
-       // match: (Select1     (AtomicAdd64 ptr val mem))
+       // match: (Select1     (AddTupleFirst64 tuple _  ))
        // cond:
-       // result: (Select1     (XADDQlock val ptr mem))
+       // result: (Select1 tuple)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpAtomicAdd64 {
+               if v_0.Op != OpAMD64AddTupleFirst64 {
                        break
                }
-               ptr := v_0.Args[0]
-               val := v_0.Args[1]
-               mem := v_0.Args[2]
+               tuple := v_0.Args[0]
                v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem))
-               v0.AddArg(val)
-               v0.AddArg(ptr)
-               v0.AddArg(mem)
-               v.AddArg(v0)
+               v.AddArg(tuple)
                return true
        }
        return false