cmd/compile: resurrect clobberdead mode

This CL resurrects the clobberdead debugging mode (CL 23924).
When -clobberdead flag is set (TODO: make it GOEXPERIMENT?), the
compiler inserts code that clobbers all dead stack slots that
contains pointers.

Mark windows syscall functions cgo_unsafe_args, as the code
actually does that, by taking the address of one argument and
passing it to cgocall.

Change-Id: Ie09a015f4bd14ae6053cc707866e30ae509b9d6f
Reviewed-on: https://go-review.googlesource.com/c/go/+/301791
Trust: Cherry Zhang <cherryyz@google.com>
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Than McIntosh <thanm@google.com>
This commit is contained in:
Cherry Zhang 2021-03-12 18:38:02 -05:00
parent 0bd308ff27
commit 8628bf9a97
5 changed files with 251 additions and 3 deletions

View file

@ -90,6 +90,7 @@ type CmdFlags struct {
BuildID string "help:\"record `id` as the build id in the export metadata\""
CPUProfile string "help:\"write cpu profile to `file`\""
Complete bool "help:\"compiling complete package (no C or assembly)\""
ClobberDead bool "help:\"clobber dead stack slots (for debugging)\""
Dwarf bool "help:\"generate DWARF symbols\""
DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below

View file

@ -16,7 +16,9 @@ package liveness
import (
"crypto/md5"
"crypto/sha1"
"fmt"
"os"
"sort"
"strings"
@ -30,6 +32,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
// OpVarDef is an annotation for the liveness analysis, marking a place
@ -123,9 +126,9 @@ type liveness struct {
unsafePoints bitvec.BitVec
// An array with a bit vector for each safe point in the
// current Block during Liveness.epilogue. Indexed in Value
// current Block during liveness.epilogue. Indexed in Value
// order for that block. Additionally, for the entry block
// livevars[0] is the entry bitmap. Liveness.compact moves
// livevars[0] is the entry bitmap. liveness.compact moves
// these to stackMaps.
livevars []bitvec.BitVec
@ -136,6 +139,8 @@ type liveness struct {
stackMaps []bitvec.BitVec
cache progeffectscache
doClobber bool // Whether to clobber dead stack slots in this function.
}
// Map maps from *ssa.Value to LivenessIndex.
@ -387,6 +392,9 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int
lv.livenessMap.reset()
lv.markUnsafePoints()
lv.enableClobber()
return lv
}
@ -820,6 +828,10 @@ func (lv *liveness) epilogue() {
live.Or(*live, liveout)
}
if lv.doClobber {
lv.clobber(b)
}
// The liveness maps for this block are now complete. Compact them.
lv.compact(b)
}
@ -873,7 +885,7 @@ func (lv *liveness) compact(b *ssa.Block) {
}
for _, v := range b.Values {
hasStackMap := lv.hasStackMap(v)
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
isUnsafePoint := lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID))
idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
if hasStackMap {
idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
@ -888,6 +900,153 @@ func (lv *liveness) compact(b *ssa.Block) {
lv.livevars = lv.livevars[:0]
}
func (lv *liveness) enableClobber() {
// The clobberdead experiment inserts code to clobber pointer slots in all
// the dead variables (locals and args) at every synchronous safepoint.
if !base.Flag.ClobberDead {
return
}
if lv.fn.Pragma&ir.CgoUnsafeArgs != 0 {
// C or assembly code uses the exact frame layout. Don't clobber.
return
}
if len(lv.vars) > 10000 || len(lv.f.Blocks) > 10000 {
// Be careful to avoid doing too much work.
// Bail if >10000 variables or >10000 blocks.
// Otherwise, giant functions make this experiment generate too much code.
return
}
if lv.f.Name == "forkAndExecInChild" || lv.f.Name == "wbBufFlush" {
// forkAndExecInChild calls vfork on some platforms.
// The code we add here clobbers parts of the stack in the child.
// When the parent resumes, it is using the same stack frame. But the
// child has clobbered stack variables that the parent needs. Boom!
// In particular, the sys argument gets clobbered.
//
// runtime.wbBufFlush must not modify its arguments. See the comments
// in runtime/mwbbuf.go:wbBufFlush.
return
}
if h := os.Getenv("GOCLOBBERDEADHASH"); h != "" {
// Clobber only functions where the hash of the function name matches a pattern.
// Useful for binary searching for a miscompiled function.
hstr := ""
for _, b := range sha1.Sum([]byte(lv.f.Name)) {
hstr += fmt.Sprintf("%08b", b)
}
if !strings.HasSuffix(hstr, h) {
return
}
fmt.Printf("\t\t\tCLOBBERDEAD %s\n", lv.f.Name)
}
lv.doClobber = true
}
// Inserts code to clobber pointer slots in all the dead variables (locals and args)
// at every synchronous safepoint in b.
func (lv *liveness) clobber(b *ssa.Block) {
// Copy block's values to a temporary.
oldSched := append([]*ssa.Value{}, b.Values...)
b.Values = b.Values[:0]
idx := 0
// Clobber pointer slots in all dead variables at entry.
if b == lv.f.Entry {
for len(oldSched) > 0 && len(oldSched[0].Args) == 0 {
// Skip argless ops. We need to skip at least
// the lowered ClosurePtr op, because it
// really wants to be first. This will also
// skip ops like InitMem and SP, which are ok.
b.Values = append(b.Values, oldSched[0])
oldSched = oldSched[1:]
}
clobber(lv, b, lv.livevars[0])
idx++
}
// Copy values into schedule, adding clobbering around safepoints.
for _, v := range oldSched {
if !lv.hasStackMap(v) {
b.Values = append(b.Values, v)
continue
}
clobber(lv, b, lv.livevars[idx])
b.Values = append(b.Values, v)
idx++
}
}
// clobber generates code to clobber pointer slots in all dead variables
// (those not marked in live). Clobbering instructions are added to the end
// of b.Values.
func clobber(lv *liveness, b *ssa.Block, live bitvec.BitVec) {
for i, n := range lv.vars {
if !live.Get(int32(i)) && !n.Addrtaken() {
// Don't clobber stack objects (address-taken). They are
// tracked dynamically.
clobberVar(b, n)
}
}
}
// clobberVar generates code to trash the pointers in v.
// Clobbering instructions are added to the end of b.Values.
func clobberVar(b *ssa.Block, v *ir.Name) {
clobberWalk(b, v, 0, v.Type())
}
// b = block to which we append instructions
// v = variable
// offset = offset of (sub-portion of) variable to clobber (in bytes)
// t = type of sub-portion of v.
func clobberWalk(b *ssa.Block, v *ir.Name, offset int64, t *types.Type) {
if !t.HasPointers() {
return
}
switch t.Kind() {
case types.TPTR,
types.TUNSAFEPTR,
types.TFUNC,
types.TCHAN,
types.TMAP:
clobberPtr(b, v, offset)
case types.TSTRING:
// struct { byte *str; int len; }
clobberPtr(b, v, offset)
case types.TINTER:
// struct { Itab *tab; void *data; }
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
clobberPtr(b, v, offset)
clobberPtr(b, v, offset+int64(types.PtrSize))
case types.TSLICE:
// struct { byte *array; int len; int cap; }
clobberPtr(b, v, offset)
case types.TARRAY:
for i := int64(0); i < t.NumElem(); i++ {
clobberWalk(b, v, offset+i*t.Elem().Size(), t.Elem())
}
case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
clobberWalk(b, v, offset+t1.Offset, t1.Type)
}
default:
base.Fatalf("clobberWalk: unexpected type, %v", t)
}
}
// clobberPtr generates a clobber of the pointer at offset offset in v.
// The clobber instruction is added at the end of b.
func clobberPtr(b *ssa.Block, v *ir.Name, offset int64) {
b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v)
}
func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return

View file

@ -0,0 +1,47 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
import (
"internal/testenv"
"io/ioutil"
"os/exec"
"path/filepath"
"testing"
)
const helloSrc = `
package main
import "fmt"
func main() { fmt.Println("hello") }
`
func TestClobberDead(t *testing.T) {
// Test that clobberdead mode generates correct program.
if testing.Short() {
// This test rebuilds the runtime with a special flag, which
// takes a while.
t.Skip("skip in short mode")
}
testenv.MustHaveGoRun(t)
t.Parallel()
tmpdir := t.TempDir()
src := filepath.Join(tmpdir, "x.go")
err := ioutil.WriteFile(src, []byte(helloSrc), 0644)
if err != nil {
t.Fatalf("write file failed: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "run", "-gcflags=all=-clobberdead", src)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("go run failed: %v\n%s", err, out)
}
if string(out) != "hello\n" {
t.Errorf("wrong output: got %q, want %q", out, "hello\n")
}
}

View file

@ -263,6 +263,7 @@ const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
// to the full path inside of system32 for use with vanilla LoadLibrary.
//go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary
//go:nosplit
//go:cgo_unsafe_args
func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle, err uintptr) {
lockOSThread()
c := &getg().m.syscall
@ -293,6 +294,7 @@ func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (hand
//go:linkname syscall_loadlibrary syscall.loadlibrary
//go:nosplit
//go:cgo_unsafe_args
func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
lockOSThread()
defer unlockOSThread()
@ -310,6 +312,7 @@ func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
//go:linkname syscall_getprocaddress syscall.getprocaddress
//go:nosplit
//go:cgo_unsafe_args
func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) {
lockOSThread()
defer unlockOSThread()
@ -327,6 +330,7 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint
//go:linkname syscall_Syscall syscall.Syscall
//go:nosplit
//go:cgo_unsafe_args
func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
lockOSThread()
defer unlockOSThread()
@ -340,6 +344,7 @@ func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
//go:linkname syscall_Syscall6 syscall.Syscall6
//go:nosplit
//go:cgo_unsafe_args
func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
lockOSThread()
defer unlockOSThread()
@ -353,6 +358,7 @@ func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err ui
//go:linkname syscall_Syscall9 syscall.Syscall9
//go:nosplit
//go:cgo_unsafe_args
func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
lockOSThread()
defer unlockOSThread()
@ -366,6 +372,7 @@ func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1
//go:linkname syscall_Syscall12 syscall.Syscall12
//go:nosplit
//go:cgo_unsafe_args
func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
lockOSThread()
defer unlockOSThread()
@ -379,6 +386,7 @@ func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11,
//go:linkname syscall_Syscall15 syscall.Syscall15
//go:nosplit
//go:cgo_unsafe_args
func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
lockOSThread()
defer unlockOSThread()
@ -392,6 +400,7 @@ func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11,
//go:linkname syscall_Syscall18 syscall.Syscall18
//go:nosplit
//go:cgo_unsafe_args
func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) {
lockOSThread()
defer unlockOSThread()

View file

@ -0,0 +1,32 @@
// asmcheck -gcflags=-clobberdead
// +build amd64
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package codegen
type T [2]*int // contain pointer, not SSA-able (so locals are not registerized)
var p1, p2, p3 T
func F() {
// 3735936685 is 0xdeaddead
// clobber x, y at entry. not clobber z (stack object).
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`, -`MOVL\t\$3735936685, ""\.z`
x, y, z := p1, p2, p3
addrTaken(&z)
// x is dead at the call (the value of x is loaded before the CALL), y is not
// amd64:`MOVL\t\$3735936685, ""\.x`, -`MOVL\t\$3735936685, ""\.y`
use(x)
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`
use(y)
}
//go:noinline
func use(T) {}
//go:noinline
func addrTaken(*T) {}