]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile: add spectre mitigation mode enabled by -spectre
authorRuss Cox <rsc@golang.org>
Mon, 6 Jan 2020 15:31:39 +0000 (10:31 -0500)
committerRuss Cox <rsc@golang.org>
Fri, 13 Mar 2020 19:05:46 +0000 (19:05 +0000)
This commit adds a new cmd/compile flag -spectre,
which accepts a comma-separated list of possible
Spectre mitigations to apply, or the empty string (none),
or "all". The only known mitigation right now is "index",
which uses conditional moves to ensure that x86-64 CPUs
do not speculate past index bounds checks.

Speculating past index bounds checks may be problematic
on systems running privileged servers that accept requests
from untrusted users who can execute their own programs
on the same machine. (And some more constraints that
make it even more unlikely in practice.)

The cases this protects against are analogous to the ones
Microsoft explains in the "Array out of bounds load/store feeding ..."
sections here:
https://docs.microsoft.com/en-us/cpp/security/developer-guidance-speculative-execution?view=vs-2019#array-out-of-bounds-load-feeding-an-indirect-branch

Change-Id: Ib7532d7e12466b17e04c4e2075c2a456dc98f610
Reviewed-on: https://go-review.googlesource.com/c/go/+/222660
Reviewed-by: Keith Randall <khr@golang.org>
src/cmd/compile/internal/gc/main.go
src/cmd/compile/internal/gc/ssa.go
src/cmd/compile/internal/ssa/gen/AMD64.rules
src/cmd/compile/internal/ssa/gen/genericOps.go
src/cmd/compile/internal/ssa/opGen.go
src/cmd/compile/internal/ssa/rewriteAMD64.go
test/codegen/spectre.go [new file with mode: 0644]

index 18a210baa49f7082b32306924ab10b1e013082bd..e3a47339bee86b95adc7de4e406c24cae3994e98 100644 (file)
@@ -36,7 +36,9 @@ import (
 var imported_unsafe bool
 
 var (
-       buildid string
+       buildid      string
+       spectre      string
+       spectreIndex bool
 )
 
 var (
@@ -250,6 +252,7 @@ func Main(archInit func(*Arch)) {
        if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
                flag.BoolVar(&flag_race, "race", false, "enable race detector")
        }
+       flag.StringVar(&spectre, "spectre", spectre, "enable spectre mitigations in `list` (all, index, ret)")
        if enableTrace {
                flag.BoolVar(&trace, "t", false, "trace type-checking")
        }
@@ -282,10 +285,33 @@ func Main(archInit func(*Arch)) {
 
        objabi.Flagparse(usage)
 
+       for _, f := range strings.Split(spectre, ",") {
+               f = strings.TrimSpace(f)
+               switch f {
+               default:
+                       log.Fatalf("unknown setting -spectre=%s", f)
+               case "":
+                       // nothing
+               case "all":
+                       spectreIndex = true
+               case "index":
+                       spectreIndex = true
+               }
+       }
+
+       if spectreIndex {
+               switch objabi.GOARCH {
+               case "amd64":
+                       // ok
+               default:
+                       log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
+               }
+       }
+
        // Record flags that affect the build result. (And don't
        // record flags that don't, since that would cause spurious
        // changes in the binary.)
-       recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "newobj")
+       recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre", "newobj")
 
        if smallFrames {
                maxStackVarSize = 128 * 1024
index ddd40010e56f36c8f51b41f8cfe3c6c48843609e..2553feefbc4f341723de1574c7a35c83899c76a4 100644 (file)
@@ -2606,7 +2606,7 @@ func (s *state) expr(n *Node) *ssa.Value {
                                        return s.newValue0(ssa.OpUnknown, n.Type)
                                }
                                len := s.constInt(types.Types[TINT], bound)
-                               i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+                               s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
                                return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
                        }
                        p := s.addr(n, false)
@@ -3009,7 +3009,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
                        }
                        // Rewrite to a = [1]{v}
                        len := s.constInt(types.Types[TINT], 1)
-                       i = s.boundsCheck(i, len, ssa.BoundsIndex, false)
+                       s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
                        v := s.newValue1(ssa.OpArrayMake1, t, right)
                        s.assign(left.Left, v, false, 0)
                        return
@@ -4781,6 +4781,24 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
        if bounded || Debug['B'] != 0 {
                // If bounded or bounds checking is flag-disabled, then no check necessary,
                // just return the extended index.
+               //
+               // Here, bounded == true if the compiler generated the index itself,
+               // such as in the expansion of a slice initializer. These indexes are
+               // compiler-generated, not Go program variables, so they cannot be
+               // attacker-controlled, so we can omit Spectre masking as well.
+               //
+               // Note that we do not want to omit Spectre masking in code like:
+               //
+               //      if 0 <= i && i < len(x) {
+               //              use(x[i])
+               //      }
+               //
+               // Lucky for us, bounded==false for that code.
+               // In that case (handled below), we emit a bound check (and Spectre mask)
+               // and then the prove pass will remove the bounds check.
+               // In theory the prove pass could potentially remove certain
+               // Spectre masks, but it's very delicate and probably better
+               // to be conservative and leave them all in.
                return idx
        }
 
@@ -4832,6 +4850,15 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
        }
        s.startBlock(bNext)
 
+       // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
+       if spectreIndex {
+               op := ssa.OpSpectreIndex
+               if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
+                       op = ssa.OpSpectreSliceIndex
+               }
+               idx = s.newValue2(op, types.Types[TINT], idx, len)
+       }
+
        return idx
 }
 
index 07981d2e8195b05ba64a69afcd69ec3e423c2f62..c6fad48f3cd375c0a2800b2765acbecfb0d1b17c 100644 (file)
 
 (Slicemask <t> x) -> (SARQconst (NEGQ <t> x) [63])
 
+(SpectreIndex <t> x y) -> (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
+(SpectreSliceIndex <t> x y) -> (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
+
 // Lowering truncation
 // Because we ignore high parts of registers, truncates are just copies.
 (Trunc16to8  ...) -> (Copy ...)
index b7e91a1f20e3149da7d929abcf0cecb564dbe78b..2892a0b3cf60e198469b10dd5769939f361c1423 100644 (file)
@@ -520,6 +520,9 @@ var genericOps = []opData{
        {name: "Zeromask", argLength: 1, typ: "UInt32"}, // 0 if arg0 == 0, 0xffffffff if arg0 != 0
        {name: "Slicemask", argLength: 1},               // 0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0. Type is native int size.
 
+       {name: "SpectreIndex", argLength: 2},      // arg0 if 0 <= arg0 < arg1, 0 otherwise. Type is native int size.
+       {name: "SpectreSliceIndex", argLength: 2}, // arg0 if 0 <= arg0 <= arg1, 0 otherwise. Type is native int size.
+
        {name: "Cvt32Uto32F", argLength: 1}, // uint32 -> float32, only used on 32-bit arch
        {name: "Cvt32Uto64F", argLength: 1}, // uint32 -> float64, only used on 32-bit arch
        {name: "Cvt32Fto32U", argLength: 1}, // float32 -> uint32, only used on 32-bit arch
index 9da7376a8ac12780ec0645a08659e8596c5aca42..2573ba1f2f31f5abdcd74a27cc6130b4dfa4a44e 100644 (file)
@@ -2654,6 +2654,8 @@ const (
        OpSignmask
        OpZeromask
        OpSlicemask
+       OpSpectreIndex
+       OpSpectreSliceIndex
        OpCvt32Uto32F
        OpCvt32Uto64F
        OpCvt32Fto32U
@@ -33041,6 +33043,16 @@ var opcodeTable = [...]opInfo{
                argLen:  1,
                generic: true,
        },
+       {
+               name:    "SpectreIndex",
+               argLen:  2,
+               generic: true,
+       },
+       {
+               name:    "SpectreSliceIndex",
+               argLen:  2,
+               generic: true,
+       },
        {
                name:    "Cvt32Uto32F",
                argLen:  1,
index 16a3f641589f91337c17c506c8f2bd269e52b574..e178c1251e97e7c7fa5fc733b3643b725983f2f8 100644 (file)
@@ -1096,6 +1096,10 @@ func rewriteValueAMD64(v *Value) bool {
                return true
        case OpSlicemask:
                return rewriteValueAMD64_OpSlicemask(v)
+       case OpSpectreIndex:
+               return rewriteValueAMD64_OpSpectreIndex(v)
+       case OpSpectreSliceIndex:
+               return rewriteValueAMD64_OpSpectreSliceIndex(v)
        case OpSqrt:
                v.Op = OpAMD64SQRTSD
                return true
@@ -33033,6 +33037,44 @@ func rewriteValueAMD64_OpSlicemask(v *Value) bool {
                return true
        }
 }
+func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       typ := &b.Func.Config.Types
+       // match: (SpectreIndex <t> x y)
+       // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
+       for {
+               x := v_0
+               y := v_1
+               v.reset(OpAMD64CMOVQCC)
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+               v0.AuxInt = 0
+               v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+               v1.AddArg2(x, y)
+               v.AddArg3(x, v0, v1)
+               return true
+       }
+}
+func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       typ := &b.Func.Config.Types
+       // match: (SpectreSliceIndex <t> x y)
+       // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
+       for {
+               x := v_0
+               y := v_1
+               v.reset(OpAMD64CMOVQHI)
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+               v0.AuxInt = 0
+               v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+               v1.AddArg2(x, y)
+               v.AddArg3(x, v0, v1)
+               return true
+       }
+}
 func rewriteValueAMD64_OpStore(v *Value) bool {
        v_2 := v.Args[2]
        v_1 := v.Args[1]
diff --git a/test/codegen/spectre.go b/test/codegen/spectre.go
new file mode 100644 (file)
index 0000000..3753498
--- /dev/null
@@ -0,0 +1,38 @@
+// +build amd64
+// asmcheck -gcflags=-spectre=index
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codegen
+
+func IndexArray(x *[10]int, i int) int {
+       // amd64:`CMOVQCC`
+       return x[i]
+}
+
+func IndexString(x string, i int) byte {
+       // amd64:`CMOVQCC`
+       return x[i]
+}
+
+func IndexSlice(x []float64, i int) float64 {
+       // amd64:`CMOVQCC`
+       return x[i]
+}
+
+func SliceArray(x *[10]int, i, j int) []int {
+       // amd64:`CMOVQHI`
+       return x[i:j]
+}
+
+func SliceString(x string, i, j int) string {
+       // amd64:`CMOVQHI`
+       return x[i:j]
+}
+
+func SliceSlice(x []float64, i, j int) []float64 {
+       // amd64:`CMOVQHI`
+       return x[i:j]
+}