mirror of
https://github.com/golang/go
synced 2024-09-04 23:44:16 +00:00
Revert "cmd/compile,runtime: allocate defer records on the stack"
This reverts commit fff4f599fe
.
Reason for revert: Seems to still have issues around GC.
Fixes #32452
Change-Id: Ibe7af629f9ad6a3d5312acd7b066123f484da7f0
Reviewed-on: https://go-review.googlesource.com/c/go/+/180761
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
This commit is contained in:
parent
e9a136d185
commit
49200e3f3e
|
@ -802,7 +802,6 @@ opSwitch:
|
||||||
|
|
||||||
case ODEFER:
|
case ODEFER:
|
||||||
if e.loopdepth == 1 { // top level
|
if e.loopdepth == 1 { // top level
|
||||||
n.Esc = EscNever // force stack allocation of defer record (see ssa.go)
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// arguments leak out of scope
|
// arguments leak out of scope
|
||||||
|
|
|
@ -882,7 +882,6 @@ func (e *Escape) augmentParamHole(k EscHole, where *Node) EscHole {
|
||||||
// non-transient location to avoid arguments from being
|
// non-transient location to avoid arguments from being
|
||||||
// transiently allocated.
|
// transiently allocated.
|
||||||
if where.Op == ODEFER && e.loopDepth == 1 {
|
if where.Op == ODEFER && e.loopDepth == 1 {
|
||||||
where.Esc = EscNever // force stack allocation of defer record (see ssa.go)
|
|
||||||
// TODO(mdempsky): Eliminate redundant EscLocation allocs.
|
// TODO(mdempsky): Eliminate redundant EscLocation allocs.
|
||||||
return e.teeHole(k, e.newLoc(nil, false).asHole())
|
return e.teeHole(k, e.newLoc(nil, false).asHole())
|
||||||
}
|
}
|
||||||
|
|
|
@ -287,7 +287,6 @@ var (
|
||||||
assertI2I,
|
assertI2I,
|
||||||
assertI2I2,
|
assertI2I2,
|
||||||
deferproc,
|
deferproc,
|
||||||
deferprocStack,
|
|
||||||
Deferreturn,
|
Deferreturn,
|
||||||
Duffcopy,
|
Duffcopy,
|
||||||
Duffzero,
|
Duffzero,
|
||||||
|
|
|
@ -317,48 +317,6 @@ func hiter(t *types.Type) *types.Type {
|
||||||
return hiter
|
return hiter
|
||||||
}
|
}
|
||||||
|
|
||||||
// deferstruct makes a runtime._defer structure, with additional space for
|
|
||||||
// stksize bytes of args.
|
|
||||||
func deferstruct(stksize int64) *types.Type {
|
|
||||||
makefield := func(name string, typ *types.Type) *types.Field {
|
|
||||||
f := types.NewField()
|
|
||||||
f.Type = typ
|
|
||||||
// Unlike the global makefield function, this one needs to set Pkg
|
|
||||||
// because these types might be compared (in SSA CSE sorting).
|
|
||||||
// TODO: unify this makefield and the global one above.
|
|
||||||
f.Sym = &types.Sym{Name: name, Pkg: localpkg}
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
argtype := types.NewArray(types.Types[TUINT8], stksize)
|
|
||||||
argtype.SetNoalg(true)
|
|
||||||
argtype.Width = stksize
|
|
||||||
argtype.Align = 1
|
|
||||||
// These fields must match the ones in runtime/runtime2.go:_defer and
|
|
||||||
// cmd/compile/internal/gc/ssa.go:(*state).call.
|
|
||||||
fields := []*types.Field{
|
|
||||||
makefield("siz", types.Types[TUINT32]),
|
|
||||||
makefield("started", types.Types[TBOOL]),
|
|
||||||
makefield("heap", types.Types[TBOOL]),
|
|
||||||
makefield("sp", types.Types[TUINTPTR]),
|
|
||||||
makefield("pc", types.Types[TUINTPTR]),
|
|
||||||
// Note: the types here don't really matter. Defer structures
|
|
||||||
// are always scanned explicitly during stack copying and GC,
|
|
||||||
// so we make them uintptr type even though they are real pointers.
|
|
||||||
makefield("fn", types.Types[TUINTPTR]),
|
|
||||||
makefield("_panic", types.Types[TUINTPTR]),
|
|
||||||
makefield("link", types.Types[TUINTPTR]),
|
|
||||||
makefield("args", argtype),
|
|
||||||
}
|
|
||||||
|
|
||||||
// build struct holding the above fields
|
|
||||||
s := types.New(TSTRUCT)
|
|
||||||
s.SetNoalg(true)
|
|
||||||
s.SetFields(fields)
|
|
||||||
s.Width = widstruct(s, s, 0, 1)
|
|
||||||
s.Align = uint8(Widthptr)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// f is method type, with receiver.
|
// f is method type, with receiver.
|
||||||
// return function type, receiver as first argument (or not).
|
// return function type, receiver as first argument (or not).
|
||||||
func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
|
func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
|
||||||
|
|
|
@ -68,7 +68,6 @@ func initssaconfig() {
|
||||||
assertI2I = sysfunc("assertI2I")
|
assertI2I = sysfunc("assertI2I")
|
||||||
assertI2I2 = sysfunc("assertI2I2")
|
assertI2I2 = sysfunc("assertI2I2")
|
||||||
deferproc = sysfunc("deferproc")
|
deferproc = sysfunc("deferproc")
|
||||||
deferprocStack = sysfunc("deferprocStack")
|
|
||||||
Deferreturn = sysfunc("deferreturn")
|
Deferreturn = sysfunc("deferreturn")
|
||||||
Duffcopy = sysvar("duffcopy") // asm func with special ABI
|
Duffcopy = sysvar("duffcopy") // asm func with special ABI
|
||||||
Duffzero = sysvar("duffzero") // asm func with special ABI
|
Duffzero = sysvar("duffzero") // asm func with special ABI
|
||||||
|
@ -865,11 +864,7 @@ func (s *state) stmt(n *Node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case ODEFER:
|
case ODEFER:
|
||||||
d := callDefer
|
s.call(n.Left, callDefer)
|
||||||
if n.Esc == EscNever {
|
|
||||||
d = callDeferStack
|
|
||||||
}
|
|
||||||
s.call(n.Left, d)
|
|
||||||
case OGO:
|
case OGO:
|
||||||
s.call(n.Left, callGo)
|
s.call(n.Left, callGo)
|
||||||
|
|
||||||
|
@ -2864,7 +2859,6 @@ type callKind int8
|
||||||
const (
|
const (
|
||||||
callNormal callKind = iota
|
callNormal callKind = iota
|
||||||
callDefer
|
callDefer
|
||||||
callDeferStack
|
|
||||||
callGo
|
callGo
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -3805,132 +3799,74 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
|
||||||
rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
|
rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
|
||||||
}
|
}
|
||||||
dowidth(fn.Type)
|
dowidth(fn.Type)
|
||||||
stksize := fn.Type.ArgWidth() // includes receiver, args, and results
|
stksize := fn.Type.ArgWidth() // includes receiver
|
||||||
|
|
||||||
// Run all assignments of temps.
|
// Run all assignments of temps.
|
||||||
// The temps are introduced to avoid overwriting argument
|
// The temps are introduced to avoid overwriting argument
|
||||||
// slots when arguments themselves require function calls.
|
// slots when arguments themselves require function calls.
|
||||||
s.stmtList(n.List)
|
s.stmtList(n.List)
|
||||||
|
|
||||||
var call *ssa.Value
|
// Store arguments to stack, including defer/go arguments and receiver for method calls.
|
||||||
if k == callDeferStack {
|
// These are written in SP-offset order.
|
||||||
// Make a defer struct d on the stack.
|
argStart := Ctxt.FixedFrameSize()
|
||||||
t := deferstruct(stksize)
|
// Defer/go args.
|
||||||
d := tempAt(n.Pos, s.curfn, t)
|
if k != callNormal {
|
||||||
|
// Write argsize and closure (args to newproc/deferproc).
|
||||||
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
|
argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
|
||||||
addr := s.addr(d, false)
|
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
|
||||||
|
s.store(types.Types[TUINT32], addr, argsize)
|
||||||
// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
|
addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
|
||||||
// 0: siz
|
s.store(types.Types[TUINTPTR], addr, closure)
|
||||||
s.store(types.Types[TUINT32],
|
stksize += 2 * int64(Widthptr)
|
||||||
s.newValue1I(ssa.OpOffPtr, types.Types[TUINT32].PtrTo(), t.FieldOff(0), addr),
|
argStart += 2 * int64(Widthptr)
|
||||||
s.constInt32(types.Types[TUINT32], int32(stksize)))
|
|
||||||
// 1: started, set in deferprocStack
|
|
||||||
// 2: heap, set in deferprocStack
|
|
||||||
// 3: sp, set in deferprocStack
|
|
||||||
// 4: pc, set in deferprocStack
|
|
||||||
// 5: fn
|
|
||||||
s.store(closure.Type,
|
|
||||||
s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
|
|
||||||
closure)
|
|
||||||
// 6: panic, set in deferprocStack
|
|
||||||
// 7: link, set in deferprocStack
|
|
||||||
|
|
||||||
// Then, store all the arguments of the defer call.
|
|
||||||
ft := fn.Type
|
|
||||||
off := t.FieldOff(8)
|
|
||||||
args := n.Rlist.Slice()
|
|
||||||
|
|
||||||
// Set receiver (for interface calls). Always a pointer.
|
|
||||||
if rcvr != nil {
|
|
||||||
p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
|
|
||||||
s.store(types.Types[TUINTPTR], p, rcvr)
|
|
||||||
}
|
|
||||||
// Set receiver (for method calls).
|
|
||||||
if n.Op == OCALLMETH {
|
|
||||||
f := ft.Recv()
|
|
||||||
s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
|
|
||||||
args = args[1:]
|
|
||||||
}
|
|
||||||
// Set other args.
|
|
||||||
for _, f := range ft.Params().Fields().Slice() {
|
|
||||||
s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
|
|
||||||
args = args[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call runtime.deferprocStack with pointer to _defer record.
|
|
||||||
arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
|
|
||||||
s.store(types.Types[TUINTPTR], arg0, addr)
|
|
||||||
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferprocStack, s.mem())
|
|
||||||
if stksize < int64(Widthptr) {
|
|
||||||
// We need room for both the call to deferprocStack and the call to
|
|
||||||
// the deferred function.
|
|
||||||
stksize = int64(Widthptr)
|
|
||||||
}
|
|
||||||
call.AuxInt = stksize
|
|
||||||
} else {
|
|
||||||
// Store arguments to stack, including defer/go arguments and receiver for method calls.
|
|
||||||
// These are written in SP-offset order.
|
|
||||||
argStart := Ctxt.FixedFrameSize()
|
|
||||||
// Defer/go args.
|
|
||||||
if k != callNormal {
|
|
||||||
// Write argsize and closure (args to newproc/deferproc).
|
|
||||||
argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
|
|
||||||
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
|
|
||||||
s.store(types.Types[TUINT32], addr, argsize)
|
|
||||||
addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
|
|
||||||
s.store(types.Types[TUINTPTR], addr, closure)
|
|
||||||
stksize += 2 * int64(Widthptr)
|
|
||||||
argStart += 2 * int64(Widthptr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set receiver (for interface calls).
|
|
||||||
if rcvr != nil {
|
|
||||||
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
|
|
||||||
s.store(types.Types[TUINTPTR], addr, rcvr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write args.
|
|
||||||
t := n.Left.Type
|
|
||||||
args := n.Rlist.Slice()
|
|
||||||
if n.Op == OCALLMETH {
|
|
||||||
f := t.Recv()
|
|
||||||
s.storeArg(args[0], f.Type, argStart+f.Offset)
|
|
||||||
args = args[1:]
|
|
||||||
}
|
|
||||||
for i, n := range args {
|
|
||||||
f := t.Params().Field(i)
|
|
||||||
s.storeArg(n, f.Type, argStart+f.Offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
// call target
|
|
||||||
switch {
|
|
||||||
case k == callDefer:
|
|
||||||
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferproc, s.mem())
|
|
||||||
case k == callGo:
|
|
||||||
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, newproc, s.mem())
|
|
||||||
case closure != nil:
|
|
||||||
// rawLoad because loading the code pointer from a
|
|
||||||
// closure is always safe, but IsSanitizerSafeAddr
|
|
||||||
// can't always figure that out currently, and it's
|
|
||||||
// critical that we not clobber any arguments already
|
|
||||||
// stored onto the stack.
|
|
||||||
codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
|
|
||||||
call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem())
|
|
||||||
case codeptr != nil:
|
|
||||||
call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem())
|
|
||||||
case sym != nil:
|
|
||||||
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem())
|
|
||||||
default:
|
|
||||||
Fatalf("bad call type %v %v", n.Op, n)
|
|
||||||
}
|
|
||||||
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set receiver (for interface calls).
|
||||||
|
if rcvr != nil {
|
||||||
|
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
|
||||||
|
s.store(types.Types[TUINTPTR], addr, rcvr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write args.
|
||||||
|
t := n.Left.Type
|
||||||
|
args := n.Rlist.Slice()
|
||||||
|
if n.Op == OCALLMETH {
|
||||||
|
f := t.Recv()
|
||||||
|
s.storeArg(args[0], f.Type, argStart+f.Offset)
|
||||||
|
args = args[1:]
|
||||||
|
}
|
||||||
|
for i, n := range args {
|
||||||
|
f := t.Params().Field(i)
|
||||||
|
s.storeArg(n, f.Type, argStart+f.Offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// call target
|
||||||
|
var call *ssa.Value
|
||||||
|
switch {
|
||||||
|
case k == callDefer:
|
||||||
|
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferproc, s.mem())
|
||||||
|
case k == callGo:
|
||||||
|
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, newproc, s.mem())
|
||||||
|
case closure != nil:
|
||||||
|
// rawLoad because loading the code pointer from a
|
||||||
|
// closure is always safe, but IsSanitizerSafeAddr
|
||||||
|
// can't always figure that out currently, and it's
|
||||||
|
// critical that we not clobber any arguments already
|
||||||
|
// stored onto the stack.
|
||||||
|
codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
|
||||||
|
call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem())
|
||||||
|
case codeptr != nil:
|
||||||
|
call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem())
|
||||||
|
case sym != nil:
|
||||||
|
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem())
|
||||||
|
default:
|
||||||
|
Fatalf("bad call type %v %v", n.Op, n)
|
||||||
|
}
|
||||||
|
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
|
||||||
s.vars[&memVar] = call
|
s.vars[&memVar] = call
|
||||||
|
|
||||||
// Finish block for defers
|
// Finish block for defers
|
||||||
if k == callDefer || k == callDeferStack {
|
if k == callDefer {
|
||||||
b := s.endBlock()
|
b := s.endBlock()
|
||||||
b.Kind = ssa.BlockDefer
|
b.Kind = ssa.BlockDefer
|
||||||
b.SetControl(call)
|
b.SetControl(call)
|
||||||
|
@ -4425,27 +4361,17 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *state) storeArg(n *Node, t *types.Type, off int64) {
|
func (s *state) storeArg(n *Node, t *types.Type, off int64) {
|
||||||
s.storeArgWithBase(n, t, s.sp, off)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) {
|
|
||||||
pt := types.NewPtr(t)
|
pt := types.NewPtr(t)
|
||||||
var addr *ssa.Value
|
sp := s.constOffPtrSP(pt, off)
|
||||||
if base == s.sp {
|
|
||||||
// Use special routine that avoids allocation on duplicate offsets.
|
|
||||||
addr = s.constOffPtrSP(pt, off)
|
|
||||||
} else {
|
|
||||||
addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !canSSAType(t) {
|
if !canSSAType(t) {
|
||||||
a := s.addr(n, false)
|
a := s.addr(n, false)
|
||||||
s.move(t, addr, a)
|
s.move(t, sp, a)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
a := s.expr(n)
|
a := s.expr(n)
|
||||||
s.storeType(t, addr, a, 0, false)
|
s.storeType(t, sp, a, 0, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
|
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
|
||||||
|
|
|
@ -712,31 +712,15 @@ func scanstack(gp *g, gcw *gcWork) {
|
||||||
|
|
||||||
// Find additional pointers that point into the stack from the heap.
|
// Find additional pointers that point into the stack from the heap.
|
||||||
// Currently this includes defers and panics. See also function copystack.
|
// Currently this includes defers and panics. See also function copystack.
|
||||||
|
|
||||||
// Find and trace all defer arguments.
|
|
||||||
tracebackdefers(gp, scanframe, nil)
|
tracebackdefers(gp, scanframe, nil)
|
||||||
|
|
||||||
// Find and trace other pointers in defer records.
|
|
||||||
for d := gp._defer; d != nil; d = d.link {
|
for d := gp._defer; d != nil; d = d.link {
|
||||||
|
// tracebackdefers above does not scan the func value, which could
|
||||||
|
// be a stack allocated closure. See issue 30453.
|
||||||
if d.fn != nil {
|
if d.fn != nil {
|
||||||
// tracebackdefers above does not scan the func value, which could
|
|
||||||
// be a stack allocated closure. See issue 30453.
|
|
||||||
scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
|
scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
|
||||||
}
|
}
|
||||||
if d.link != nil {
|
|
||||||
// The link field of a stack-allocated defer record might point
|
|
||||||
// to a heap-allocated defer record. Keep that heap record live.
|
|
||||||
scanblock(uintptr(unsafe.Pointer(&d.link)), sys.PtrSize, &oneptrmask[0], gcw, &state)
|
|
||||||
}
|
|
||||||
// Retain defers records themselves.
|
|
||||||
// Defer records might not be reachable from the G through regular heap
|
|
||||||
// tracing because the defer linked list might weave between the stack and the heap.
|
|
||||||
if d.heap {
|
|
||||||
scanblock(uintptr(unsafe.Pointer(&d)), sys.PtrSize, &oneptrmask[0], gcw, &state)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if gp._panic != nil {
|
if gp._panic != nil {
|
||||||
// Panics are always stack allocated.
|
|
||||||
state.putPtr(uintptr(unsafe.Pointer(gp._panic)))
|
state.putPtr(uintptr(unsafe.Pointer(gp._panic)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -228,46 +228,6 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
|
||||||
// been set and must not be clobbered.
|
// been set and must not be clobbered.
|
||||||
}
|
}
|
||||||
|
|
||||||
// deferprocStack queues a new deferred function with a defer record on the stack.
|
|
||||||
// The defer record must have its siz and fn fields initialized.
|
|
||||||
// All other fields can contain junk.
|
|
||||||
// The defer record must be immediately followed in memory by
|
|
||||||
// the arguments of the defer.
|
|
||||||
// Nosplit because the arguments on the stack won't be scanned
|
|
||||||
// until the defer record is spliced into the gp._defer list.
|
|
||||||
//go:nosplit
|
|
||||||
func deferprocStack(d *_defer) {
|
|
||||||
gp := getg()
|
|
||||||
if gp.m.curg != gp {
|
|
||||||
// go code on the system stack can't defer
|
|
||||||
throw("defer on system stack")
|
|
||||||
}
|
|
||||||
// siz and fn are already set.
|
|
||||||
// The other fields are junk on entry to deferprocStack and
|
|
||||||
// are initialized here.
|
|
||||||
d.started = false
|
|
||||||
d.heap = false
|
|
||||||
d.sp = getcallersp()
|
|
||||||
d.pc = getcallerpc()
|
|
||||||
// The lines below implement:
|
|
||||||
// d.panic = nil
|
|
||||||
// d.link = gp._defer
|
|
||||||
// gp._defer = d
|
|
||||||
// But without write barriers. The first two are writes to
|
|
||||||
// the stack so they don't need a write barrier, and furthermore
|
|
||||||
// are to uninitialized memory, so they must not use a write barrier.
|
|
||||||
// The third write does not require a write barrier because we
|
|
||||||
// explicitly mark all the defer structures, so we don't need to
|
|
||||||
// keep track of pointers to them with a write barrier.
|
|
||||||
*(*uintptr)(unsafe.Pointer(&d._panic)) = 0
|
|
||||||
*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
|
|
||||||
*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
|
|
||||||
|
|
||||||
return0()
|
|
||||||
// No code can go here - the C return register has
|
|
||||||
// been set and must not be clobbered.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
|
// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
|
||||||
// Each P holds a pool for defers with small arg sizes.
|
// Each P holds a pool for defers with small arg sizes.
|
||||||
// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
|
// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
|
||||||
|
@ -389,7 +349,6 @@ func newdefer(siz int32) *_defer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.siz = siz
|
d.siz = siz
|
||||||
d.heap = true
|
|
||||||
d.link = gp._defer
|
d.link = gp._defer
|
||||||
gp._defer = d
|
gp._defer = d
|
||||||
return d
|
return d
|
||||||
|
@ -409,9 +368,6 @@ func freedefer(d *_defer) {
|
||||||
if d.fn != nil {
|
if d.fn != nil {
|
||||||
freedeferfn()
|
freedeferfn()
|
||||||
}
|
}
|
||||||
if !d.heap {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sc := deferclass(uintptr(d.siz))
|
sc := deferclass(uintptr(d.siz))
|
||||||
if sc >= uintptr(len(p{}.deferpool)) {
|
if sc >= uintptr(len(p{}.deferpool)) {
|
||||||
return
|
return
|
||||||
|
|
|
@ -775,16 +775,9 @@ func extendRandom(r []byte, n int) {
|
||||||
|
|
||||||
// A _defer holds an entry on the list of deferred calls.
|
// A _defer holds an entry on the list of deferred calls.
|
||||||
// If you add a field here, add code to clear it in freedefer.
|
// If you add a field here, add code to clear it in freedefer.
|
||||||
// This struct must match the code in cmd/compile/internal/gc/reflect.go:deferstruct
|
|
||||||
// and cmd/compile/internal/gc/ssa.go:(*state).call.
|
|
||||||
// Some defers will be allocated on the stack and some on the heap.
|
|
||||||
// All defers are logically part of the stack, so write barriers to
|
|
||||||
// initialize them are not required. All defers must be manually scanned,
|
|
||||||
// and for heap defers, marked.
|
|
||||||
type _defer struct {
|
type _defer struct {
|
||||||
siz int32 // includes both arguments and results
|
siz int32
|
||||||
started bool
|
started bool
|
||||||
heap bool
|
|
||||||
sp uintptr // sp at time of defer
|
sp uintptr // sp at time of defer
|
||||||
pc uintptr
|
pc uintptr
|
||||||
fn *funcval
|
fn *funcval
|
||||||
|
|
|
@ -719,21 +719,16 @@ func adjustctxt(gp *g, adjinfo *adjustinfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func adjustdefers(gp *g, adjinfo *adjustinfo) {
|
func adjustdefers(gp *g, adjinfo *adjustinfo) {
|
||||||
|
// Adjust defer argument blocks the same way we adjust active stack frames.
|
||||||
|
tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
|
||||||
|
|
||||||
// Adjust pointers in the Defer structs.
|
// Adjust pointers in the Defer structs.
|
||||||
// We need to do this first because we need to adjust the
|
// Defer structs themselves are never on the stack.
|
||||||
// defer.link fields so we always work on the new stack.
|
|
||||||
adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
|
|
||||||
for d := gp._defer; d != nil; d = d.link {
|
for d := gp._defer; d != nil; d = d.link {
|
||||||
adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
|
adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
|
||||||
adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
|
adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
|
||||||
adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
|
adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
|
||||||
adjustpointer(adjinfo, unsafe.Pointer(&d.link))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adjust defer argument blocks the same way we adjust active stack frames.
|
|
||||||
// Note: this code is after the loop above, so that if a defer record is
|
|
||||||
// stack allocated, we work on the copy in the new stack.
|
|
||||||
tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func adjustpanics(gp *g, adjinfo *adjustinfo) {
|
func adjustpanics(gp *g, adjinfo *adjustinfo) {
|
||||||
|
|
|
@ -799,58 +799,3 @@ func TestDeferLiveness(t *testing.T) {
|
||||||
t.Errorf("output:\n%s\n\nwant no output", output)
|
t.Errorf("output:\n%s\n\nwant no output", output)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeferHeapAndStack(t *testing.T) {
|
|
||||||
P := 4 // processors
|
|
||||||
N := 10000 //iterations
|
|
||||||
D := 200 // stack depth
|
|
||||||
|
|
||||||
if testing.Short() {
|
|
||||||
P /= 2
|
|
||||||
N /= 10
|
|
||||||
D /= 10
|
|
||||||
}
|
|
||||||
c := make(chan bool)
|
|
||||||
for p := 0; p < P; p++ {
|
|
||||||
go func() {
|
|
||||||
for i := 0; i < N; i++ {
|
|
||||||
if deferHeapAndStack(D) != 2*D {
|
|
||||||
panic("bad result")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c <- true
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
for p := 0; p < P; p++ {
|
|
||||||
<-c
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// deferHeapAndStack(n) computes 2*n
|
|
||||||
func deferHeapAndStack(n int) (r int) {
|
|
||||||
if n == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n%2 == 0 {
|
|
||||||
// heap-allocated defers
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
defer func() {
|
|
||||||
r++
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// stack-allocated defers
|
|
||||||
defer func() {
|
|
||||||
r++
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
r++
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
r = deferHeapAndStack(n - 1)
|
|
||||||
escapeMe(new([1024]byte)) // force some GCs
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pass a value to escapeMe to force it to escape.
|
|
||||||
var escapeMe = func(x interface{}) {}
|
|
||||||
|
|
|
@ -248,6 +248,9 @@ func getclosureptr() uintptr
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func asmcgocall(fn, arg unsafe.Pointer) int32
|
func asmcgocall(fn, arg unsafe.Pointer) int32
|
||||||
|
|
||||||
|
// argp used in Defer structs when there is no argp.
|
||||||
|
const _NoArgs = ^uintptr(0)
|
||||||
|
|
||||||
func morestack()
|
func morestack()
|
||||||
func morestack_noctxt()
|
func morestack_noctxt()
|
||||||
func rt0_go()
|
func rt0_go()
|
||||||
|
|
|
@ -112,6 +112,7 @@ const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle, err uintptr) {
|
func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle, err uintptr) {
|
||||||
lockOSThread()
|
lockOSThread()
|
||||||
|
defer unlockOSThread()
|
||||||
c := &getg().m.syscall
|
c := &getg().m.syscall
|
||||||
|
|
||||||
if useLoadLibraryEx {
|
if useLoadLibraryEx {
|
||||||
|
@ -134,7 +135,6 @@ func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (hand
|
||||||
if handle == 0 {
|
if handle == 0 {
|
||||||
err = c.err
|
err = c.err
|
||||||
}
|
}
|
||||||
unlockOSThread() // not defer'd after the lockOSThread above to save stack frame size.
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -148,6 +148,11 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
|
||||||
waspanic := false
|
waspanic := false
|
||||||
cgoCtxt := gp.cgoCtxt
|
cgoCtxt := gp.cgoCtxt
|
||||||
printing := pcbuf == nil && callback == nil
|
printing := pcbuf == nil && callback == nil
|
||||||
|
_defer := gp._defer
|
||||||
|
|
||||||
|
for _defer != nil && _defer.sp == _NoArgs {
|
||||||
|
_defer = _defer.link
|
||||||
|
}
|
||||||
|
|
||||||
// If the PC is zero, it's likely a nil function call.
|
// If the PC is zero, it's likely a nil function call.
|
||||||
// Start in the caller's frame.
|
// Start in the caller's frame.
|
||||||
|
@ -314,14 +319,15 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
|
||||||
// In the latter case, use a deferreturn call site as the continuation pc.
|
// In the latter case, use a deferreturn call site as the continuation pc.
|
||||||
frame.continpc = frame.pc
|
frame.continpc = frame.pc
|
||||||
if waspanic {
|
if waspanic {
|
||||||
if frame.fn.deferreturn != 0 {
|
// We match up defers with frames using the SP.
|
||||||
|
// However, if the function has an empty stack
|
||||||
|
// frame, then it's possible (on LR machines)
|
||||||
|
// for multiple call frames to have the same
|
||||||
|
// SP. But, since a function with no frame
|
||||||
|
// can't push a defer, the defer can't belong
|
||||||
|
// to that frame.
|
||||||
|
if _defer != nil && _defer.sp == frame.sp && frame.sp != frame.fp {
|
||||||
frame.continpc = frame.fn.entry + uintptr(frame.fn.deferreturn) + 1
|
frame.continpc = frame.fn.entry + uintptr(frame.fn.deferreturn) + 1
|
||||||
// Note: this may perhaps keep return variables alive longer than
|
|
||||||
// strictly necessary, as we are using "function has a defer statement"
|
|
||||||
// as a proxy for "function actually deferred something". It seems
|
|
||||||
// to be a minor drawback. (We used to actually look through the
|
|
||||||
// gp._defer for a defer corresponding to this function, but that
|
|
||||||
// is hard to do with defer records on the stack during a stack copy.)
|
|
||||||
// Note: the +1 is to offset the -1 that
|
// Note: the +1 is to offset the -1 that
|
||||||
// stack.go:getStackMap does to back up a return
|
// stack.go:getStackMap does to back up a return
|
||||||
// address make sure the pc is in the CALL instruction.
|
// address make sure the pc is in the CALL instruction.
|
||||||
|
@ -330,6 +336,11 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unwind our local defer stack past this frame.
|
||||||
|
for _defer != nil && ((_defer.sp == frame.sp && frame.sp != frame.fp) || _defer.sp == _NoArgs) {
|
||||||
|
_defer = _defer.link
|
||||||
|
}
|
||||||
|
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) {
|
if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) {
|
||||||
return n
|
return n
|
||||||
|
@ -499,6 +510,13 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
|
||||||
n = nprint
|
n = nprint
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If callback != nil, we're being called to gather stack information during
|
||||||
|
// garbage collection or stack growth. In that context, require that we used
|
||||||
|
// up the entire defer stack. If not, then there is a bug somewhere and the
|
||||||
|
// garbage collection or stack growth may not have seen the correct picture
|
||||||
|
// of the stack. Crash now instead of silently executing the garbage collection
|
||||||
|
// or stack copy incorrectly and setting up for a mysterious crash later.
|
||||||
|
//
|
||||||
// Note that panic != nil is okay here: there can be leftover panics,
|
// Note that panic != nil is okay here: there can be leftover panics,
|
||||||
// because the defers on the panic stack do not nest in frame order as
|
// because the defers on the panic stack do not nest in frame order as
|
||||||
// they do on the defer stack. If you have:
|
// they do on the defer stack. If you have:
|
||||||
|
@ -539,6 +557,16 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
|
||||||
// At other times, such as when gathering a stack for a profiling signal
|
// At other times, such as when gathering a stack for a profiling signal
|
||||||
// or when printing a traceback during a crash, everything may not be
|
// or when printing a traceback during a crash, everything may not be
|
||||||
// stopped nicely, and the stack walk may not be able to complete.
|
// stopped nicely, and the stack walk may not be able to complete.
|
||||||
|
// It's okay in those situations not to use up the entire defer stack:
|
||||||
|
// incomplete information then is still better than nothing.
|
||||||
|
if callback != nil && n < max && _defer != nil {
|
||||||
|
print("runtime: g", gp.goid, ": leftover defer sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n")
|
||||||
|
for _defer = gp._defer; _defer != nil; _defer = _defer.link {
|
||||||
|
print("\tdefer ", _defer, " sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n")
|
||||||
|
}
|
||||||
|
throw("traceback has leftover defers")
|
||||||
|
}
|
||||||
|
|
||||||
if callback != nil && n < max && frame.sp != gp.stktopsp {
|
if callback != nil && n < max && frame.sp != gp.stktopsp {
|
||||||
print("runtime: g", gp.goid, ": frame.sp=", hex(frame.sp), " top=", hex(gp.stktopsp), "\n")
|
print("runtime: g", gp.goid, ": frame.sp=", hex(frame.sp), " top=", hex(gp.stktopsp), "\n")
|
||||||
print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "] n=", n, " max=", max, "\n")
|
print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "] n=", n, " max=", max, "\n")
|
||||||
|
|
|
@ -109,8 +109,3 @@ func MightPanic(a []int, i, j, k, s int) {
|
||||||
_ = i << s // panicShift
|
_ = i << s // panicShift
|
||||||
_ = i / j // panicDivide
|
_ = i / j // panicDivide
|
||||||
}
|
}
|
||||||
|
|
||||||
func Defer() {
|
|
||||||
// amd64:`CALL\truntime\.deferprocStack`
|
|
||||||
defer func() {}()
|
|
||||||
}
|
|
||||||
|
|
|
@ -687,7 +687,7 @@ type R struct{ *T } // ERRORAUTO "live at entry to \(\*R\)\.Foo: \.this ptr" "li
|
||||||
// In particular, at printint r must be live.
|
// In particular, at printint r must be live.
|
||||||
func f41(p, q *int) (r *int) { // ERROR "live at entry to f41: p q$"
|
func f41(p, q *int) (r *int) { // ERROR "live at entry to f41: p q$"
|
||||||
r = p
|
r = p
|
||||||
defer func() { // ERROR "live at call to deferprocStack: q r$" "live at call to deferreturn: r$"
|
defer func() { // ERROR "live at call to deferproc: q r$" "live at call to deferreturn: r$"
|
||||||
recover()
|
recover()
|
||||||
}()
|
}()
|
||||||
printint(0) // ERROR "live at call to printint: q r$"
|
printint(0) // ERROR "live at call to printint: q r$"
|
||||||
|
|
Loading…
Reference in a new issue