Merge "all: REVERSE MERGE dev.typeparams (4d3cc84) into master"

This commit is contained in:
Gerrit Code Review 2021-08-12 21:33:09 +00:00
commit 0d01934094
924 changed files with 37218 additions and 17382 deletions

View file

@ -91,10 +91,18 @@ func main() {
// issue 26745
_ = func(i int) int {
return C.i + 1 // ERROR HERE: 14
// typecheck reports at column 14 ('+'), but types2 reports at
// column 10 ('C').
// TODO(mdempsky): Investigate why, and see if types2 can be
// updated to match typecheck behavior.
return C.i + 1 // ERROR HERE: \b(10|14)\b
}
_ = func(i int) {
C.fi(i) // ERROR HERE: 7
// typecheck reports at column 7 ('('), but types2 reports at
// column 8 ('i'). The types2 position is more correct, but
// updating typecheck here is fundamentally challenging because of
// IR limitations.
C.fi(i) // ERROR HERE: \b(7|8)\b
}
C.fi = C.fi // ERROR HERE

View file

@ -505,6 +505,128 @@ control bits specified by the ELF AMD64 ABI.
The x87 floating-point control word is not used by Go on amd64.
### arm64 architecture
The arm64 architecture uses R0 R15 for integer arguments and results.
It uses F0 F15 for floating-point arguments and results.
*Rationale*: 16 integer registers and 16 floating-point registers are
more than enough for passing arguments and results for practically all
functions (see Appendix). While there are more registers available,
using more registers provides little benefit. Additionally, it will add
overhead on code paths where the number of arguments are not statically
known (e.g. reflect call), and will consume more stack space when there
is only limited stack space available to fit in the nosplit limit.
Registers R16 and R17 are permanent scratch registers. They are also
used as scratch registers by the linker (Go linker and external
linker) in trampolines.
Register R18 is reserved and never used. It is reserved for the OS
on some platforms (e.g. macOS).
Registers R19 R25 are permanent scratch registers. In addition,
R27 is a permanent scratch register used by the assembler when
expanding instructions.
Floating-point registers F16 F31 are also permanent scratch
registers.
Special-purpose registers are as follows:
| Register | Call meaning | Return meaning | Body meaning |
| --- | --- | --- | --- |
| RSP | Stack pointer | Same | Same |
| R30 | Link register | Same | Scratch (non-leaf functions) |
| R29 | Frame pointer | Same | Same |
| R28 | Current goroutine | Same | Same |
| R27 | Scratch | Scratch | Scratch |
| R26 | Closure context pointer | Scratch | Scratch |
| R18 | Reserved (not used) | Same | Same |
| ZR | Zero value | Same | Same |
*Rationale*: These register meanings are compatible with Gos
stack-based calling convention.
*Rationale*: The link register, R30, holds the function return
address at the function entry. For functions that have frames
(including most non-leaf functions), R30 is saved to stack in the
function prologue and restored in the epilogue. Within the function
body, R30 can be used as a scratch register.
*Implementation note*: Registers with fixed meaning at calls but not
in function bodies must be initialized by "injected" calls such as
signal-based panics.
#### Stack layout
The stack pointer, RSP, grows down and is always aligned to 16 bytes.
*Rationale*: The arm64 architecture requires the stack pointer to be
16-byte aligned.
A function's stack frame, after the frame is created, is laid out as
follows:
+------------------------------+
| ... locals ... |
| ... outgoing arguments ... |
| return PC | ← RSP points to
| frame pointer on entry |
+------------------------------+ ↓ lower addresses
The "return PC" is loaded to the link register, R30, as part of the
arm64 `CALL` operation.
On entry, a function subtracts from RSP to open its stack frame, and
saves the values of R30 and R29 at the bottom of the frame.
Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP),
after RSP is updated.
A leaf function that does not require any stack space may omit the
saved R30 and R29.
The Go ABI's use of R29 as a frame pointer register is compatible with
arm64 architecture requirement so that Go can inter-operate with platform
debuggers and profilers.
This stack layout is used by both register-based (ABIInternal) and
stack-based (ABI0) calling conventions.
#### Flags
The arithmetic status flags (NZCV) are treated like scratch registers
and not preserved across calls.
All other bits in PSTATE are system flags and are not modified by Go.
The floating-point status register (FPSR) is treated like scratch
registers and not preserved across calls.
At calls, the floating-point control register (FPCR) bits are always
set as follows:
| Flag | Bit | Value | Meaning |
| --- | --- | --- | --- |
| DN | 25 | 0 | Propagate NaN operands |
| FZ | 24 | 0 | Do not flush to zero |
| RC | 23/22 | 0 (RN) | Round to nearest, choose even if tied |
| IDE | 15 | 0 | Denormal operations trap disabled |
| IXE | 12 | 0 | Inexact trap disabled |
| UFE | 11 | 0 | Underflow trap disabled |
| OFE | 10 | 0 | Overflow trap disabled |
| DZE | 9 | 0 | Divide-by-zero trap disabled |
| IOE | 8 | 0 | Invalid operations trap disabled |
| NEP | 2 | 0 | Scalar operations do not affect higher elements in vector registers |
| AH | 1 | 0 | No alternate handling of de-normal inputs |
| FIZ | 0 | 0 | Do not zero de-normals |
*Rationale*: Having a fixed FPCR control configuration allows Go
functions to use floating-point and vector (SIMD) operations without
modifying or saving the FPCR.
Functions are allowed to modify it between calls (as long as they
restore it), but as of this writing Go code never does.
## Future directions
### Spill path improvements

View file

@ -18,11 +18,10 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
arch.LoadRegResults = loadRegResults
arch.LoadRegResult = loadRegResult
arch.SpillArgReg = spillArgReg
}

View file

@ -57,7 +57,6 @@ func dzDI(b int64) int64 {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
r13 = 1 << iota // if R13 is already zeroed.
x15 // if X15 is already zeroed. Note: in new ABI, X15 is always zero.
)
if cnt == 0 {
@ -85,11 +84,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
}
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
*state |= x15
}
for i := int64(0); i < cnt/16; i++ {
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
@ -98,10 +92,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
*state |= x15
}
// Save DI to r12. With the amd64 Go register abi, DI can contain
// an incoming parameter, whereas R12 is always scratch.
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)

View file

@ -823,7 +823,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux2(&p.To, v, sc.Off64())
case ssa.OpAMD64MOVOstorezero:
if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
@ -914,7 +914,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64DUFFZERO:
if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
@ -997,22 +997,26 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Closure pointer is DX.
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal {
if s.ABI == obj.ABIInternal {
v.Fatalf("LoweredGetG should not appear in ABIInternal")
}
r := v.Reg()
getgFromTLS(s, r)
case ssa.OpAMD64CALLstatic:
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
s.Call(v)
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
// zeroing X15 when entering ABIInternal from ABI0
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
@ -1304,9 +1308,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
@ -1348,20 +1354,15 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
}
}
func loadRegResults(s *ssagen.State, f *ssa.Func) {
for _, o := range f.OwnAux.ABIInfo().OutParams() {
n := o.Name.(*ir.Name)
rts, offs := o.RegisterTypesAndOffsets()
for i := range o.Registers {
p := s.Prog(loadByType(rts[i]))
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_AUTO
p.From.Sym = n.Linksym()
p.From.Offset = n.FrameOffset() + offs[i]
p.To.Type = obj.TYPE_REG
p.To.Reg = ssa.ObjRegForAbiReg(o.Registers[i], f.Config)
}
}
func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
p := s.Prog(loadByType(t))
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_AUTO
p.From.Sym = n.Linksym()
p.From.Offset = n.FrameOffset() + off
p.To.Type = obj.TYPE_REG
p.To.Reg = reg
return p
}
func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {

View file

@ -18,7 +18,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = buildcfg.GOARM == 5
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue

View file

@ -18,9 +18,10 @@ func Init(arch *ssagen.ArchInfo) {
arch.PadFrame = padframe
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
arch.LoadRegResult = loadRegResult
arch.SpillArgReg = spillArgReg
}

View file

@ -10,6 +10,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
@ -161,6 +162,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
ssagen.AddrAuto(&p.To, v)
case ssa.OpArgIntReg, ssa.OpArgFloatReg:
// The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
// The loop only runs once.
for _, a := range v.Block.Func.RegArgs {
// Pass the spill/unspill information along to the assembler, offset by size of
// the saved LR slot.
addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.FixedFrameSize())
s.FuncInfo().AddSpill(
obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
}
v.Block.Func.RegArgs = nil
ssagen.CheckArgReg(v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
@ -1101,8 +1114,34 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
case ssa.OpARM64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpClobber, ssa.OpClobberReg:
// TODO: implement for clobberdead experiment. Nop is ok for now.
case ssa.OpClobber:
// MOVW $0xdeaddead, REGTMP
// MOVW REGTMP, (slot)
// MOVW REGTMP, 4(slot)
p := s.Prog(arm64.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
p = s.Prog(arm64.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGTMP
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REGSP
ssagen.AddAux(&p.To, v)
p = s.Prog(arm64.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGTMP
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REGSP
ssagen.AddAux2(&p.To, v, v.AuxInt+4)
case ssa.OpClobberReg:
x := uint64(0xdeaddeaddeaddead)
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(x)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
}
@ -1266,3 +1305,22 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
b.Fatalf("branch not implemented: %s", b.LongString())
}
}
func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
p := s.Prog(loadByType(t))
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_AUTO
p.From.Sym = n.Linksym()
p.From.Offset = n.FrameOffset() + off
p.To.Type = obj.TYPE_REG
p.To.Reg = reg
return p
}
func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
p.To.Name = obj.NAME_PARAM
p.To.Sym = n.Linksym()
p.Pos = p.Pos.WithNotStmt()
return p
}

View file

@ -0,0 +1,11 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !compiler_bootstrap
package base
// CompilerBootstrap reports whether the current compiler binary was
// built with -tags=compiler_bootstrap.
const CompilerBootstrap = false

View file

@ -0,0 +1,11 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build compiler_bootstrap
package base
// CompilerBootstrap reports whether the current compiler binary was
// built with -tags=compiler_bootstrap.
const CompilerBootstrap = true

View file

@ -44,8 +44,11 @@ type DebugFlags struct {
Panic int `help:"show all compiler panics"`
Slice int `help:"print information about slice compilation"`
SoftFloat int `help:"force compiler to emit soft-float code"`
SyncFrames int `help:"how many writer stack frames to include at sync points in unified export data"`
TypeAssert int `help:"print information about type assertion inlining"`
TypecheckInl int `help:"eager typechecking of inline function bodies"`
Unified int `help:"enable unified IR construction"`
UnifiedQuirks int `help:"enable unified IR construction's quirks mode"`
WB int `help:"print information about write barriers"`
ABIWrap int `help:"print information about ABI wrapper generation"`

View file

@ -159,7 +159,11 @@ func ParseFlags() {
Flag.LinkShared = &Ctxt.Flag_linkshared
Flag.Shared = &Ctxt.Flag_shared
Flag.WB = true
Debug.InlFuncsWithClosures = 1
if buildcfg.Experiment.Unified {
Debug.Unified = 1
}
Debug.Checkptr = -1 // so we can tell whether it is set explicitly

View file

@ -5,7 +5,7 @@
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
// +build darwin dragonfly freebsd linux netbsd openbsd
package typecheck
package base
import (
"os"
@ -19,7 +19,7 @@ import (
// mapFile returns length bytes from the file starting at the
// specified offset as a string.
func mapFile(f *os.File, offset, length int64) (string, error) {
func MapFile(f *os.File, offset, length int64) (string, error) {
// POSIX mmap: "The implementation may require that off is a
// multiple of the page size."
x := offset & int64(os.Getpagesize()-1)

View file

@ -5,14 +5,14 @@
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package typecheck
package base
import (
"io"
"os"
)
func mapFile(f *os.File, offset, length int64) (string, error) {
func MapFile(f *os.File, offset, length int64) (string, error) {
buf := make([]byte, length)
_, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf)
if err != nil {

View file

@ -233,6 +233,27 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) {
ErrorExit()
}
// Assert reports "assertion failed" with Fatalf, unless b is true.
func Assert(b bool) {
if !b {
Fatalf("assertion failed")
}
}
// Assertf reports a fatal error with Fatalf, unless b is true.
func Assertf(b bool, format string, args ...interface{}) {
if !b {
Fatalf(format, args...)
}
}
// AssertfAt reports a fatal error with FatalfAt, unless b is true.
func AssertfAt(b bool, pos src.XPos, format string, args ...interface{}) {
if !b {
FatalfAt(pos, format, args...)
}
}
// hcrash crashes the compiler when -h is set, to find out where a message is generated.
func hcrash() {
if Flag.LowerH != 0 {

View file

@ -0,0 +1,120 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
)
// addr evaluates an addressable expression n and returns a hole
// that represents storing into the represented location.
func (e *escape) addr(n ir.Node) hole {
if n == nil || ir.IsBlank(n) {
// Can happen in select case, range, maybe others.
return e.discardHole()
}
k := e.heapHole()
switch n.Op() {
default:
base.Fatalf("unexpected addr: %v", n)
case ir.ONAME:
n := n.(*ir.Name)
if n.Class == ir.PEXTERN {
break
}
k = e.oldLoc(n).asHole()
case ir.OLINKSYMOFFSET:
break
case ir.ODOT:
n := n.(*ir.SelectorExpr)
k = e.addr(n.X)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
e.discard(n.Index)
if n.X.Type().IsArray() {
k = e.addr(n.X)
} else {
e.discard(n.X)
}
case ir.ODEREF, ir.ODOTPTR:
e.discard(n)
case ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
e.discard(n.X)
e.assignHeap(n.Index, "key of map put", n)
}
return k
}
func (e *escape) addrs(l ir.Nodes) []hole {
var ks []hole
for _, n := range l {
ks = append(ks, e.addr(n))
}
return ks
}
func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
e.expr(e.heapHole().note(where, why), src)
}
// assignList evaluates the assignment dsts... = srcs....
func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
ks := e.addrs(dsts)
for i, k := range ks {
var src ir.Node
if i < len(srcs) {
src = srcs[i]
}
if dst := dsts[i]; dst != nil {
// Detect implicit conversion of uintptr to unsafe.Pointer when
// storing into reflect.{Slice,String}Header.
if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
continue
}
// Filter out some no-op assignments for escape analysis.
if src != nil && isSelfAssign(dst, src) {
if base.Flag.LowerM != 0 {
base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
}
k = e.discardHole()
}
}
e.expr(k.note(where, why), src)
}
e.reassigned(ks, where)
}
// reassigned marks the locations associated with the given holes as
// reassigned, unless the location represents a variable declared and
// assigned exactly once by where.
func (e *escape) reassigned(ks []hole, where ir.Node) {
if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
// Zero-value assignment for variable declared without an
// explicit initial value. Assume this is its initialization
// statement.
return
}
}
for _, k := range ks {
loc := k.dst
// Variables declared by range statements are assigned on every iteration.
if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
continue
}
loc.reassigned = true
}
}

View file

@ -0,0 +1,428 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// call evaluates a call expressions, including builtin calls. ks
// should contain the holes representing where the function callee's
// results flows.
func (e *escape) call(ks []hole, call ir.Node) {
var init ir.Nodes
e.callCommon(ks, call, &init, nil)
if len(init) != 0 {
call.(*ir.CallExpr).PtrInit().Append(init...)
}
}
func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir.Func) {
// argumentPragma handles escape analysis of argument *argp to the
// given hole. If the function callee is known, pragma is the
// function's pragma flags; otherwise 0.
argumentFunc := func(fn *ir.Name, k hole, argp *ir.Node) {
e.rewriteArgument(argp, init, call, fn, wrapper)
e.expr(k.note(call, "call parameter"), *argp)
}
argument := func(k hole, argp *ir.Node) {
argumentFunc(nil, k, argp)
}
switch call.Op() {
default:
ir.Dump("esc", call)
base.Fatalf("unexpected call op: %v", call.Op())
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
call := call.(*ir.CallExpr)
typecheck.FixVariadicCall(call)
typecheck.FixMethodCall(call)
// Pick out the function callee, if statically known.
//
// TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some
// functions (e.g., runtime builtins, method wrappers, generated
// eq/hash functions) don't have it set. Investigate whether
// that's a concern.
var fn *ir.Name
switch call.Op() {
case ir.OCALLFUNC:
// If we have a direct call to a closure (not just one we were
// able to statically resolve with ir.StaticValue), mark it as
// such so batch.outlives can optimize the flow results.
if call.X.Op() == ir.OCLOSURE {
call.X.(*ir.ClosureExpr).Func.SetClosureCalled(true)
}
switch v := ir.StaticValue(call.X); v.Op() {
case ir.ONAME:
if v := v.(*ir.Name); v.Class == ir.PFUNC {
fn = v
}
case ir.OCLOSURE:
fn = v.(*ir.ClosureExpr).Func.Nname
case ir.OMETHEXPR:
fn = ir.MethodExprName(v)
}
case ir.OCALLMETH:
base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
}
fntype := call.X.Type()
if fn != nil {
fntype = fn.Type()
}
if ks != nil && fn != nil && e.inMutualBatch(fn) {
for i, result := range fn.Type().Results().FieldSlice() {
e.expr(ks[i], ir.AsNode(result.Nname))
}
}
var recvp *ir.Node
if call.Op() == ir.OCALLFUNC {
// Evaluate callee function expression.
//
// Note: We use argument and not argumentFunc, because while
// call.X here may be an argument to runtime.{new,defer}proc,
// it's not an argument to fn itself.
argument(e.discardHole(), &call.X)
} else {
recvp = &call.X.(*ir.SelectorExpr).X
}
args := call.Args
if recv := fntype.Recv(); recv != nil {
if recvp == nil {
// Function call using method expression. Recevier argument is
// at the front of the regular arguments list.
recvp = &args[0]
args = args[1:]
}
argumentFunc(fn, e.tagHole(ks, fn, recv), recvp)
}
for i, param := range fntype.Params().FieldSlice() {
argumentFunc(fn, e.tagHole(ks, fn, param), &args[i])
}
case ir.OINLCALL:
call := call.(*ir.InlinedCallExpr)
e.stmts(call.Body)
for i, result := range call.ReturnVars {
k := e.discardHole()
if ks != nil {
k = ks[i]
}
e.expr(k, result)
}
case ir.OAPPEND:
call := call.(*ir.CallExpr)
args := call.Args
// Appendee slice may flow directly to the result, if
// it has enough capacity. Alternatively, a new heap
// slice might be allocated, and all slice elements
// might flow to heap.
appendeeK := ks[0]
if args[0].Type().Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
argument(appendeeK, &args[0])
if call.IsDDD {
appendedK := e.discardHole()
if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
argument(appendedK, &args[1])
} else {
for i := 1; i < len(args); i++ {
argument(e.heapHole(), &args[i])
}
}
case ir.OCOPY:
call := call.(*ir.BinaryExpr)
argument(e.discardHole(), &call.X)
copiedK := e.discardHole()
if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
argument(copiedK, &call.Y)
case ir.OPANIC:
call := call.(*ir.UnaryExpr)
argument(e.heapHole(), &call.X)
case ir.OCOMPLEX:
call := call.(*ir.BinaryExpr)
argument(e.discardHole(), &call.X)
argument(e.discardHole(), &call.Y)
case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
call := call.(*ir.CallExpr)
fixRecoverCall(call)
for i := range call.Args {
argument(e.discardHole(), &call.Args[i])
}
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
call := call.(*ir.UnaryExpr)
argument(e.discardHole(), &call.X)
case ir.OUNSAFEADD, ir.OUNSAFESLICE:
call := call.(*ir.BinaryExpr)
argument(ks[0], &call.X)
argument(e.discardHole(), &call.Y)
}
}
// goDeferStmt analyzes a "go" or "defer" statement.
//
// In the process, it also normalizes the statement to always use a
// simple function call with no arguments and no results. For example,
// it rewrites:
//
// defer f(x, y)
//
// into:
//
// x1, y1 := x, y
// defer func() { f(x1, y1) }()
func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
k := e.heapHole()
if n.Op() == ir.ODEFER && e.loopDepth == 1 {
// Top-level defer arguments don't escape to the heap,
// but they do need to last until they're invoked.
k = e.later(e.discardHole())
// force stack allocation of defer record, unless
// open-coded defers are used (see ssa.go)
n.SetEsc(ir.EscNever)
}
call := n.Call
init := n.PtrInit()
init.Append(ir.TakeInit(call)...)
e.stmts(*init)
// If the function is already a zero argument/result function call,
// just escape analyze it normally.
if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
if sig := call.X.Type(); sig.NumParams()+sig.NumResults() == 0 {
if clo, ok := call.X.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
clo.IsGoWrap = true
}
e.expr(k, call.X)
return
}
}
// Create a new no-argument function that we'll hand off to defer.
fn := ir.NewClosureFunc(n.Pos(), true)
fn.SetWrapper(true)
fn.Nname.SetType(types.NewSignature(types.LocalPkg, nil, nil, nil, nil))
fn.Body = []ir.Node{call}
clo := fn.OClosure
if n.Op() == ir.OGO {
clo.IsGoWrap = true
}
e.callCommon(nil, call, init, fn)
e.closures = append(e.closures, closure{e.spill(k, clo), clo})
// Create new top level call to closure.
n.Call = ir.NewCallExpr(call.Pos(), ir.OCALL, clo, nil)
ir.WithFunc(e.curfn, func() {
typecheck.Stmt(n.Call)
})
}
// rewriteArgument rewrites the argument *argp of the given call expression.
// fn is the static callee function, if known.
// wrapper is the go/defer wrapper function for call, if any.
func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn *ir.Name, wrapper *ir.Func) {
var pragma ir.PragmaFlag
if fn != nil && fn.Func != nil {
pragma = fn.Func.Pragma
}
// unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
// functions, so that ptr is kept alive and/or escaped as
// appropriate. unsafeUintptr also reports whether it modified arg0.
unsafeUintptr := func(arg0 ir.Node) bool {
if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
return false
}
// If the argument is really a pointer being converted to uintptr,
// arrange for the pointer to be kept alive until the call returns,
// by copying it into a temp and marking that temp
// still alive when we pop the temp stack.
if arg0.Op() != ir.OCONVNOP || !arg0.Type().IsUintptr() {
return false
}
arg := arg0.(*ir.ConvExpr)
if !arg.X.Type().IsUnsafePtr() {
return false
}
// Create and declare a new pointer-typed temp variable.
tmp := e.wrapExpr(arg.Pos(), &arg.X, init, call, wrapper)
if pragma&ir.UintptrEscapes != 0 {
e.flow(e.heapHole().note(arg, "//go:uintptrescapes"), e.oldLoc(tmp))
}
if pragma&ir.UintptrKeepAlive != 0 {
call := call.(*ir.CallExpr)
// SSA implements CallExpr.KeepAlive using OpVarLive, which
// doesn't support PAUTOHEAP variables. I tried changing it to
// use OpKeepAlive, but that ran into issues of its own.
// For now, the easy solution is to explicitly copy to (yet
// another) new temporary variable.
keep := tmp
if keep.Class == ir.PAUTOHEAP {
keep = e.copyExpr(arg.Pos(), tmp, call.PtrInit(), wrapper, false)
}
keep.SetAddrtaken(true) // ensure SSA keeps the tmp variable
call.KeepAlive = append(call.KeepAlive, keep)
}
return true
}
visit := func(pos src.XPos, argp *ir.Node) {
// Optimize a few common constant expressions. By leaving these
// untouched in the call expression, we let the wrapper handle
// evaluating them, rather than taking up closure context space.
switch arg := *argp; arg.Op() {
case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR:
return
case ir.ONAME:
if arg.(*ir.Name).Class == ir.PFUNC {
return
}
}
if unsafeUintptr(*argp) {
return
}
if wrapper != nil {
e.wrapExpr(pos, argp, init, call, wrapper)
}
}
// Peel away any slice lits.
if arg := *argp; arg.Op() == ir.OSLICELIT {
list := arg.(*ir.CompLitExpr).List
for i := range list {
visit(arg.Pos(), &list[i])
}
} else {
visit(call.Pos(), argp)
}
}
// wrapExpr replaces *exprp with a temporary variable copy. If wrapper
// is non-nil, the variable will be captured for use within that
// function.
func (e *escape) wrapExpr(pos src.XPos, exprp *ir.Node, init *ir.Nodes, call ir.Node, wrapper *ir.Func) *ir.Name {
tmp := e.copyExpr(pos, *exprp, init, e.curfn, true)
if wrapper != nil {
// Currently for "defer i.M()" if i is nil it panics at the point
// of defer statement, not when deferred function is called. We
// need to do the nil check outside of the wrapper.
if call.Op() == ir.OCALLINTER && exprp == &call.(*ir.CallExpr).X.(*ir.SelectorExpr).X {
check := ir.NewUnaryExpr(pos, ir.OCHECKNIL, ir.NewUnaryExpr(pos, ir.OITAB, tmp))
init.Append(typecheck.Stmt(check))
}
e.oldLoc(tmp).captured = true
tmp = ir.NewClosureVar(pos, wrapper, tmp)
}
*exprp = tmp
return tmp
}
// copyExpr creates and returns a new temporary variable within fn;
// appends statements to init to declare and initialize it to expr;
// and escape analyzes the data flow if analyze is true.
func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Func, analyze bool) *ir.Name {
if ir.HasUniquePos(expr) {
pos = expr.Pos()
}
tmp := typecheck.TempAt(pos, fn, expr.Type())
stmts := []ir.Node{
ir.NewDecl(pos, ir.ODCL, tmp),
ir.NewAssignStmt(pos, tmp, expr),
}
typecheck.Stmts(stmts)
init.Append(stmts...)
if analyze {
e.newLoc(tmp, false)
e.stmts(stmts)
}
return tmp
}
// tagHole returns a hole for evaluating an argument passed to param.
// ks should contain the holes representing where the function
// callee's results flows. fn is the statically-known callee function,
// if any.
func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
// If this is a dynamic call, we can't rely on param.Note.
if fn == nil {
return e.heapHole()
}
if e.inMutualBatch(fn) {
return e.addr(ir.AsNode(param.Nname))
}
// Call to previously tagged function.
var tagKs []hole
esc := parseLeaks(param.Note)
if x := esc.Heap(); x >= 0 {
tagKs = append(tagKs, e.heapHole().shift(x))
}
if ks != nil {
for i := 0; i < numEscResults; i++ {
if x := esc.Result(i); x >= 0 {
tagKs = append(tagKs, ks[i].shift(x))
}
}
}
return e.teeHole(tagKs...)
}

View file

@ -0,0 +1,37 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
)
// TODO(mdempsky): Desugaring doesn't belong during escape analysis,
// but for now it's the most convenient place for some rewrites.
// fixRecoverCall rewrites an ORECOVER call into ORECOVERFP,
// adding an explicit frame pointer argument.
// If call is not an ORECOVER call, it's left unmodified.
func fixRecoverCall(call *ir.CallExpr) {
if call.Op() != ir.ORECOVER {
return
}
pos := call.Pos()
// FP is equal to caller's SP plus FixedFrameSize().
var fp ir.Node = ir.NewCallExpr(pos, ir.OGETCALLERSP, nil, nil)
if off := base.Ctxt.FixedFrameSize(); off != 0 {
fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off))
}
// TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
fp = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
call.SetOp(ir.ORECOVERFP)
call.Args = []ir.Node{typecheck.Expr(fp)}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,335 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
// expr models evaluating an expression n and flowing the result into
// hole k.
func (e *escape) expr(k hole, n ir.Node) {
if n == nil {
return
}
e.stmts(n.Init())
e.exprSkipInit(k, n)
}
func (e *escape) exprSkipInit(k hole, n ir.Node) {
if n == nil {
return
}
lno := ir.SetPos(n)
defer func() {
base.Pos = lno
}()
if k.derefs >= 0 && !n.Type().HasPointers() {
k.dst = &e.blankLoc
}
switch n.Op() {
default:
base.Fatalf("unexpected expr: %s %v", n.Op().String(), n)
case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
// nop
case ir.ONAME:
n := n.(*ir.Name)
if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
return
}
e.flow(k, e.oldLoc(n))
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
n := n.(*ir.UnaryExpr)
e.discard(n.X)
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
n := n.(*ir.BinaryExpr)
e.discard(n.X)
e.discard(n.Y)
case ir.OANDAND, ir.OOROR:
n := n.(*ir.LogicalExpr)
e.discard(n.X)
e.discard(n.Y)
case ir.OADDR:
n := n.(*ir.AddrExpr)
e.expr(k.addr(n, "address-of"), n.X) // "address-of"
case ir.ODEREF:
n := n.(*ir.StarExpr)
e.expr(k.deref(n, "indirection"), n.X) // "indirection"
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
n := n.(*ir.SelectorExpr)
e.expr(k.note(n, "dot"), n.X)
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
case ir.ODOTTYPE, ir.ODOTTYPE2:
n := n.(*ir.TypeAssertExpr)
e.expr(k.dotType(n.Type(), n, "dot"), n.X)
case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
n := n.(*ir.DynamicTypeAssertExpr)
e.expr(k.dotType(n.Type(), n, "dot"), n.X)
// n.T doesn't need to be tracked; it always points to read-only storage.
case ir.OINDEX:
n := n.(*ir.IndexExpr)
if n.X.Type().IsArray() {
e.expr(k.note(n, "fixed-array-index-of"), n.X)
} else {
// TODO(mdempsky): Fix why reason text.
e.expr(k.deref(n, "dot of pointer"), n.X)
}
e.discard(n.Index)
case ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
e.discard(n.X)
e.discard(n.Index)
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
n := n.(*ir.SliceExpr)
e.expr(k.note(n, "slice"), n.X)
e.discard(n.Low)
e.discard(n.High)
e.discard(n.Max)
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
// runtime instrumentation, since we can more
// easily detect object boundaries on the heap
// than the stack.
e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
} else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
e.unsafeValue(k, n.X)
} else {
e.expr(k, n.X)
}
case ir.OCONVIFACE, ir.OCONVIDATA:
n := n.(*ir.ConvExpr)
if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
k = e.spill(k, n)
}
e.expr(k.note(n, "interface-converted"), n.X)
case ir.OEFACE:
n := n.(*ir.BinaryExpr)
// Note: n.X is not needed because it can never point to memory that might escape.
e.expr(k, n.Y)
case ir.OIDATA, ir.OSPTR:
n := n.(*ir.UnaryExpr)
e.expr(k, n.X)
case ir.OSLICE2ARRPTR:
// the slice pointer flows directly to the result
n := n.(*ir.ConvExpr)
e.expr(k, n.X)
case ir.ORECV:
n := n.(*ir.UnaryExpr)
e.discard(n.X)
case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVER, ir.OUNSAFEADD, ir.OUNSAFESLICE:
e.call([]hole{k}, n)
case ir.ONEW:
n := n.(*ir.UnaryExpr)
e.spill(k, n)
case ir.OMAKESLICE:
n := n.(*ir.MakeExpr)
e.spill(k, n)
e.discard(n.Len)
e.discard(n.Cap)
case ir.OMAKECHAN:
n := n.(*ir.MakeExpr)
e.discard(n.Len)
case ir.OMAKEMAP:
n := n.(*ir.MakeExpr)
e.spill(k, n)
e.discard(n.Len)
case ir.OMETHVALUE:
// Flow the receiver argument to both the closure and
// to the receiver parameter.
n := n.(*ir.SelectorExpr)
closureK := e.spill(k, n)
m := n.Selection
// We don't know how the method value will be called
// later, so conservatively assume the result
// parameters all flow to the heap.
//
// TODO(mdempsky): Change ks into a callback, so that
// we don't have to create this slice?
var ks []hole
for i := m.Type.NumResults(); i > 0; i-- {
ks = append(ks, e.heapHole())
}
name, _ := m.Nname.(*ir.Name)
paramK := e.tagHole(ks, name, m.Type.Recv())
e.expr(e.teeHole(paramK, closureK), n.X)
case ir.OPTRLIT:
n := n.(*ir.AddrExpr)
e.expr(e.spill(k, n), n.X)
case ir.OARRAYLIT:
n := n.(*ir.CompLitExpr)
for _, elt := range n.List {
if elt.Op() == ir.OKEY {
elt = elt.(*ir.KeyExpr).Value
}
e.expr(k.note(n, "array literal element"), elt)
}
case ir.OSLICELIT:
n := n.(*ir.CompLitExpr)
k = e.spill(k, n)
for _, elt := range n.List {
if elt.Op() == ir.OKEY {
elt = elt.(*ir.KeyExpr).Value
}
e.expr(k.note(n, "slice-literal-element"), elt)
}
case ir.OSTRUCTLIT:
n := n.(*ir.CompLitExpr)
for _, elt := range n.List {
e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
}
case ir.OMAPLIT:
n := n.(*ir.CompLitExpr)
e.spill(k, n)
// Map keys and values are always stored in the heap.
for _, elt := range n.List {
elt := elt.(*ir.KeyExpr)
e.assignHeap(elt.Key, "map literal key", n)
e.assignHeap(elt.Value, "map literal value", n)
}
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
k = e.spill(k, n)
e.closures = append(e.closures, closure{k, n})
if fn := n.Func; fn.IsHiddenClosure() {
for _, cv := range fn.ClosureVars {
if loc := e.oldLoc(cv); !loc.captured {
loc.captured = true
// Ignore reassignments to the variable in straightline code
// preceding the first capture by a closure.
if loc.loopDepth == e.loopDepth {
loc.reassigned = false
}
}
}
for _, n := range fn.Dcl {
// Add locations for local variables of the
// closure, if needed, in case we're not including
// the closure func in the batch for escape
// analysis (happens for escape analysis called
// from reflectdata.methodWrapper)
if n.Op() == ir.ONAME && n.Opt == nil {
e.with(fn).newLoc(n, false)
}
}
e.walkFunc(fn)
}
case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
n := n.(*ir.ConvExpr)
e.spill(k, n)
e.discard(n.X)
case ir.OADDSTR:
n := n.(*ir.AddStringExpr)
e.spill(k, n)
// Arguments of OADDSTR never escape;
// runtime.concatstrings makes sure of that.
e.discards(n.List)
case ir.ODYNAMICTYPE:
// Nothing to do - argument is a *runtime._type (+ maybe a *runtime.itab) pointing to static data section
}
}
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
func (e *escape) unsafeValue(k hole, n ir.Node) {
if n.Type().Kind() != types.TUINTPTR {
base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
if k.addrtaken {
base.Fatalf("unexpected addrtaken")
}
e.stmts(n.Init())
switch n.Op() {
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
if n.X.Type().IsUnsafePtr() {
e.expr(k, n.X)
} else {
e.discard(n.X)
}
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
if ir.IsReflectHeaderDataField(n) {
e.expr(k.deref(n, "reflect.Header.Data"), n.X)
} else {
e.discard(n.X)
}
case ir.OPLUS, ir.ONEG, ir.OBITNOT:
n := n.(*ir.UnaryExpr)
e.unsafeValue(k, n.X)
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
n := n.(*ir.BinaryExpr)
e.unsafeValue(k, n.X)
e.unsafeValue(k, n.Y)
case ir.OLSH, ir.ORSH:
n := n.(*ir.BinaryExpr)
e.unsafeValue(k, n.X)
// RHS need not be uintptr-typed (#32959) and can't meaningfully
// flow pointers anyway.
e.discard(n.Y)
default:
e.exprSkipInit(e.discardHole(), n)
}
}
// discard evaluates an expression n for side-effects, but discards
// its value.
func (e *escape) discard(n ir.Node) {
e.expr(e.discardHole(), n)
}
func (e *escape) discards(l ir.Nodes) {
for _, n := range l {
e.discard(n)
}
}
// spill allocates a new location associated with expression n, flows
// its address to k, and returns a hole that flows values to it. It's
// intended for use with most expressions that allocate storage.
func (e *escape) spill(k hole, n ir.Node) hole {
loc := e.newLoc(n, true)
e.flow(k.addr(n, "spill"), loc)
return loc.asHole()
}

View file

@ -0,0 +1,324 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/types"
"fmt"
)
// Below we implement the methods for walking the AST and recording
// data flow edges. Note that because a sub-expression might have
// side-effects, it's important to always visit the entire AST.
//
// For example, write either:
//
// if x {
// e.discard(n.Left)
// } else {
// e.value(k, n.Left)
// }
//
// or
//
// if x {
// k = e.discardHole()
// }
// e.value(k, n.Left)
//
// Do NOT write:
//
// // BAD: possibly loses side-effects within n.Left
// if !x {
// e.value(k, n.Left)
// }
// An location represents an abstract location that stores a Go
// variable.
type location struct {
n ir.Node // represented variable or expression, if any
curfn *ir.Func // enclosing function
edges []edge // incoming edges
loopDepth int // loopDepth at declaration
// resultIndex records the tuple index (starting at 1) for
// PPARAMOUT variables within their function's result type.
// For non-PPARAMOUT variables it's 0.
resultIndex int
// derefs and walkgen are used during walkOne to track the
// minimal dereferences from the walk root.
derefs int // >= -1
walkgen uint32
// dst and dstEdgeindex track the next immediate assignment
// destination location during walkone, along with the index
// of the edge pointing back to this location.
dst *location
dstEdgeIdx int
// queued is used by walkAll to track whether this location is
// in the walk queue.
queued bool
// escapes reports whether the represented variable's address
// escapes; that is, whether the variable must be heap
// allocated.
escapes bool
// transient reports whether the represented expression's
// address does not outlive the statement; that is, whether
// its storage can be immediately reused.
transient bool
// paramEsc records the represented parameter's leak set.
paramEsc leaks
captured bool // has a closure captured this variable?
reassigned bool // has this variable been reassigned?
addrtaken bool // has this variable's address been taken?
}
// An edge represents an assignment edge between two Go variables.
type edge struct {
src *location
derefs int // >= -1
notes *note
}
func (l *location) asHole() hole {
return hole{dst: l}
}
// leak records that parameter l leaks to sink.
func (l *location) leakTo(sink *location, derefs int) {
// If sink is a result parameter that doesn't escape (#44614)
// and we can fit return bits into the escape analysis tag,
// then record as a result leak.
if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
ri := sink.resultIndex - 1
if ri < numEscResults {
// Leak to result parameter.
l.paramEsc.AddResult(ri, derefs)
return
}
}
// Otherwise, record as heap leak.
l.paramEsc.AddHeap(derefs)
}
func (l *location) isName(c ir.Class) bool {
return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
}
// An hole represents a context for evaluation a Go
// expression. E.g., when evaluating p in "x = **p", we'd have a hole
// with dst==x and derefs==2.
type hole struct {
dst *location
derefs int // >= -1
notes *note
// addrtaken indicates whether this context is taking the address of
// the expression, independent of whether the address will actually
// be stored into a variable.
addrtaken bool
}
type note struct {
next *note
where ir.Node
why string
}
func (k hole) note(where ir.Node, why string) hole {
if where == nil || why == "" {
base.Fatalf("note: missing where/why")
}
if base.Flag.LowerM >= 2 || logopt.Enabled() {
k.notes = &note{
next: k.notes,
where: where,
why: why,
}
}
return k
}
func (k hole) shift(delta int) hole {
k.derefs += delta
if k.derefs < -1 {
base.Fatalf("derefs underflow: %v", k.derefs)
}
k.addrtaken = delta < 0
return k
}
func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
if !t.IsInterface() && !types.IsDirectIface(t) {
k = k.shift(1)
}
return k.note(where, why)
}
func (b *batch) flow(k hole, src *location) {
if k.addrtaken {
src.addrtaken = true
}
dst := k.dst
if dst == &b.blankLoc {
return
}
if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
return
}
if dst.escapes && k.derefs < 0 { // dst = &src
if base.Flag.LowerM >= 2 || logopt.Enabled() {
pos := base.FmtPos(src.n.Pos())
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
}
}
src.escapes = true
return
}
// TODO(mdempsky): Deduplicate edges?
dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
}
func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
func (b *batch) oldLoc(n *ir.Name) *location {
if n.Canonical().Opt == nil {
base.Fatalf("%v has no location", n)
}
return n.Canonical().Opt.(*location)
}
func (e *escape) newLoc(n ir.Node, transient bool) *location {
if e.curfn == nil {
base.Fatalf("e.curfn isn't set")
}
if n != nil && n.Type() != nil && n.Type().NotInHeap() {
base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
}
if n != nil && n.Op() == ir.ONAME {
if canon := n.(*ir.Name).Canonical(); n != canon {
base.Fatalf("newLoc on non-canonical %v (canonical is %v)", n, canon)
}
}
loc := &location{
n: n,
curfn: e.curfn,
loopDepth: e.loopDepth,
transient: transient,
}
e.allLocs = append(e.allLocs, loc)
if n != nil {
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class == ir.PPARAM && n.Curfn == nil {
// ok; hidden parameter
} else if n.Curfn != e.curfn {
base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
}
if n.Opt != nil {
base.Fatalf("%v already has a location", n)
}
n.Opt = loc
}
}
return loc
}
// teeHole returns a new hole that flows into each hole of ks,
// similar to the Unix tee(1) command.
func (e *escape) teeHole(ks ...hole) hole {
if len(ks) == 0 {
return e.discardHole()
}
if len(ks) == 1 {
return ks[0]
}
// TODO(mdempsky): Optimize if there's only one non-discard hole?
// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
// new temporary location ltmp, wire it into place, and return
// a hole for "ltmp = _".
loc := e.newLoc(nil, true)
for _, k := range ks {
// N.B., "p = &q" and "p = &tmp; tmp = q" are not
// semantically equivalent. To combine holes like "l1
// = _" and "l2 = &_", we'd need to wire them as "l1 =
// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
// instead.
if k.derefs < 0 {
base.Fatalf("teeHole: negative derefs")
}
e.flow(k, loc)
}
return loc.asHole()
}
// later returns a new hole that flows into k, but some time later.
// Its main effect is to prevent immediate reuse of temporary
// variables introduced during Order.
func (e *escape) later(k hole) hole {
loc := e.newLoc(nil, false)
e.flow(k, loc)
return loc.asHole()
}
// Fmt is called from node printing to print information about escape analysis results.
func Fmt(n ir.Node) string {
text := ""
switch n.Esc() {
case ir.EscUnknown:
break
case ir.EscHeap:
text = "esc(h)"
case ir.EscNone:
text = "esc(no)"
case ir.EscNever:
text = "esc(N)"
default:
text = fmt.Sprintf("esc(%d)", n.Esc())
}
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
if text != "" {
text += " "
}
text += fmt.Sprintf("ld(%d)", loc.loopDepth)
}
}
return text
}

View file

@ -0,0 +1,106 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"math"
"strings"
)
const numEscResults = 7
// An leaks represents a set of assignment flows from a parameter
// to the heap or to any of its function's (first numEscResults)
// result parameters.
type leaks [1 + numEscResults]uint8
// Empty reports whether l is an empty set (i.e., no assignment flows).
func (l leaks) Empty() bool { return l == leaks{} }
// Heap returns the minimum deref count of any assignment flow from l
// to the heap. If no such flows exist, Heap returns -1.
func (l leaks) Heap() int { return l.get(0) }
// Result returns the minimum deref count of any assignment flow from
// l to its function's i'th result parameter. If no such flows exist,
// Result returns -1.
func (l leaks) Result(i int) int { return l.get(1 + i) }
// AddHeap adds an assignment flow from l to the heap.
func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
// AddResult adds an assignment flow from l to its function's i'th
// result parameter.
func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
func (l leaks) get(i int) int { return int(l[i]) - 1 }
func (l *leaks) add(i, derefs int) {
if old := l.get(i); old < 0 || derefs < old {
l.set(i, derefs)
}
}
func (l *leaks) set(i, derefs int) {
v := derefs + 1
if v < 0 {
base.Fatalf("invalid derefs count: %v", derefs)
}
if v > math.MaxUint8 {
v = math.MaxUint8
}
l[i] = uint8(v)
}
// Optimize removes result flow paths that are equal in length or
// longer than the shortest heap flow path.
func (l *leaks) Optimize() {
// If we have a path to the heap, then there's no use in
// keeping equal or longer paths elsewhere.
if x := l.Heap(); x >= 0 {
for i := 0; i < numEscResults; i++ {
if l.Result(i) >= x {
l.setResult(i, -1)
}
}
}
}
var leakTagCache = map[leaks]string{}
// Encode converts l into a binary string for export data.
func (l leaks) Encode() string {
if l.Heap() == 0 {
// Space optimization: empty string encodes more
// efficiently in export data.
return ""
}
if s, ok := leakTagCache[l]; ok {
return s
}
n := len(l)
for n > 0 && l[n-1] == 0 {
n--
}
s := "esc:" + string(l[:n])
leakTagCache[l] = s
return s
}
// parseLeaks parses a binary string representing a leaks
func parseLeaks(s string) leaks {
var l leaks
if !strings.HasPrefix(s, "esc:") {
l.AddHeap(0)
return l
}
copy(l[:], s[4:])
return l
}

View file

@ -0,0 +1,289 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/internal/src"
"fmt"
"strings"
)
// walkAll computes the minimal dereferences between all pairs of
// locations.
func (b *batch) walkAll() {
// We use a work queue to keep track of locations that we need
// to visit, and repeatedly walk until we reach a fixed point.
//
// We walk once from each location (including the heap), and
// then re-enqueue each location on its transition from
// transient->!transient and !escapes->escapes, which can each
// happen at most once. So we take Θ(len(e.allLocs)) walks.
// LIFO queue, has enough room for e.allLocs and e.heapLoc.
todo := make([]*location, 0, len(b.allLocs)+1)
enqueue := func(loc *location) {
if !loc.queued {
todo = append(todo, loc)
loc.queued = true
}
}
for _, loc := range b.allLocs {
enqueue(loc)
}
enqueue(&b.heapLoc)
var walkgen uint32
for len(todo) > 0 {
root := todo[len(todo)-1]
todo = todo[:len(todo)-1]
root.queued = false
walkgen++
b.walkOne(root, walkgen, enqueue)
}
}
// walkOne computes the minimal number of dereferences from root to
// all other locations.
func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
// The data flow graph has negative edges (from addressing
// operations), so we use the Bellman-Ford algorithm. However,
// we don't have to worry about infinite negative cycles since
// we bound intermediate dereference counts to 0.
root.walkgen = walkgen
root.derefs = 0
root.dst = nil
todo := []*location{root} // LIFO queue
for len(todo) > 0 {
l := todo[len(todo)-1]
todo = todo[:len(todo)-1]
derefs := l.derefs
// If l.derefs < 0, then l's address flows to root.
addressOf := derefs < 0
if addressOf {
// For a flow path like "root = &l; l = x",
// l's address flows to root, but x's does
// not. We recognize this by lower bounding
// derefs at 0.
derefs = 0
// If l's address flows to a non-transient
// location, then l can't be transiently
// allocated.
if !root.transient && l.transient {
l.transient = false
enqueue(l)
}
}
if b.outlives(root, l) {
// l's value flows to root. If l is a function
// parameter and root is the heap or a
// corresponding result parameter, then record
// that value flow for tagging the function
// later.
if l.isName(ir.PPARAM) {
if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
}
explanation := b.explainPath(root, l)
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
}
}
l.leakTo(root, derefs)
}
// If l's address flows somewhere that
// outlives it, then l needs to be heap
// allocated.
if addressOf && !l.escapes {
if logopt.Enabled() || base.Flag.LowerM >= 2 {
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
}
explanation := b.explainPath(root, l)
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
}
}
l.escapes = true
enqueue(l)
continue
}
}
for i, edge := range l.edges {
if edge.src.escapes {
continue
}
d := derefs + edge.derefs
if edge.src.walkgen != walkgen || edge.src.derefs > d {
edge.src.walkgen = walkgen
edge.src.derefs = d
edge.src.dst = l
edge.src.dstEdgeIdx = i
todo = append(todo, edge.src)
}
}
}
}
// explainPath prints an explanation of how src flows to the walk root.
func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
visited := make(map[*location]bool)
pos := base.FmtPos(src.n.Pos())
var explanation []*logopt.LoggedOpt
for {
// Prevent infinite loop.
if visited[src] {
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
}
break
}
visited[src] = true
dst := src.dst
edge := &dst.edges[src.dstEdgeIdx]
if edge.src != src {
base.Fatalf("path inconsistency: %v != %v", edge.src, src)
}
explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
if dst == root {
break
}
src = dst
}
return explanation
}
func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
ops := "&"
if derefs >= 0 {
ops = strings.Repeat("*", derefs)
}
print := base.Flag.LowerM >= 2
flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
if print {
fmt.Printf("%s:%s\n", pos, flow)
}
if logopt.Enabled() {
var epos src.XPos
if notes != nil {
epos = notes.where.Pos()
} else if srcloc != nil && srcloc.n != nil {
epos = srcloc.n.Pos()
}
var e_curfn *ir.Func // TODO(mdempsky): Fix.
explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
}
for note := notes; note != nil; note = note.next {
if print {
fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
}
if logopt.Enabled() {
var e_curfn *ir.Func // TODO(mdempsky): Fix.
explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
fmt.Sprintf(" from %v (%v)", note.where, note.why)))
}
}
return explanation
}
func (b *batch) explainLoc(l *location) string {
if l == &b.heapLoc {
return "{heap}"
}
if l.n == nil {
// TODO(mdempsky): Omit entirely.
return "{temp}"
}
if l.n.Op() == ir.ONAME {
return fmt.Sprintf("%v", l.n)
}
return fmt.Sprintf("{storage for %v}", l.n)
}
// outlives reports whether values stored in l may survive beyond
// other's lifetime if stack allocated.
func (b *batch) outlives(l, other *location) bool {
// The heap outlives everything.
if l.escapes {
return true
}
// We don't know what callers do with returned values, so
// pessimistically we need to assume they flow to the heap and
// outlive everything too.
if l.isName(ir.PPARAMOUT) {
// Exception: Directly called closures can return
// locations allocated outside of them without forcing
// them to the heap. For example:
//
// var u int // okay to stack allocate
// *(func() *int { return &u }()) = 42
if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
return false
}
return true
}
// If l and other are within the same function, then l
// outlives other if it was declared outside other's loop
// scope. For example:
//
// var l *int
// for {
// l = new(int)
// }
if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
return true
}
// If other is declared within a child closure of where l is
// declared, then l outlives it. For example:
//
// var l *int
// func() {
// l = new(int)
// }
if containsClosure(l.curfn, other.curfn) {
return true
}
return false
}
// containsClosure reports whether c is a closure contained within f.
func containsClosure(f, c *ir.Func) bool {
// Common case.
if f == c {
return false
}
// Closures within function Foo are named like "Foo.funcN..."
// TODO(mdempsky): Better way to recognize this.
fn := f.Sym().Name
cn := c.Sym().Name
return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
}

View file

@ -0,0 +1,207 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"fmt"
)
// stmt evaluates a single Go statement.
func (e *escape) stmt(n ir.Node) {
if n == nil {
return
}
lno := ir.SetPos(n)
defer func() {
base.Pos = lno
}()
if base.Flag.LowerM > 2 {
fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
}
e.stmts(n.Init())
switch n.Op() {
default:
base.Fatalf("unexpected stmt: %v", n)
case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
// nop
case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
// TODO(mdempsky): Handle dead code?
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
e.stmts(n.List)
case ir.ODCL:
// Record loop depth at declaration.
n := n.(*ir.Decl)
if !ir.IsBlank(n.X) {
e.dcl(n.X)
}
case ir.OLABEL:
n := n.(*ir.LabelStmt)
switch e.labels[n.Label] {
case nonlooping:
if base.Flag.LowerM > 2 {
fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
}
case looping:
if base.Flag.LowerM > 2 {
fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
}
e.loopDepth++
default:
base.Fatalf("label missing tag")
}
delete(e.labels, n.Label)
case ir.OIF:
n := n.(*ir.IfStmt)
e.discard(n.Cond)
e.block(n.Body)
e.block(n.Else)
case ir.OFOR, ir.OFORUNTIL:
n := n.(*ir.ForStmt)
e.loopDepth++
e.discard(n.Cond)
e.stmt(n.Post)
e.block(n.Body)
e.loopDepth--
case ir.ORANGE:
// for Key, Value = range X { Body }
n := n.(*ir.RangeStmt)
// X is evaluated outside the loop.
tmp := e.newLoc(nil, false)
e.expr(tmp.asHole(), n.X)
e.loopDepth++
ks := e.addrs([]ir.Node{n.Key, n.Value})
if n.X.Type().IsArray() {
e.flow(ks[1].note(n, "range"), tmp)
} else {
e.flow(ks[1].deref(n, "range-deref"), tmp)
}
e.reassigned(ks, n)
e.block(n.Body)
e.loopDepth--
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
var ks []hole
if guard.Tag != nil {
for _, cas := range n.Cases {
cv := cas.Var
k := e.dcl(cv) // type switch variables have no ODCL.
if cv.Type().HasPointers() {
ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
}
}
}
e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
} else {
e.discard(n.Tag)
}
for _, cas := range n.Cases {
e.discards(cas.List)
e.block(cas.Body)
}
case ir.OSELECT:
n := n.(*ir.SelectStmt)
for _, cas := range n.Cases {
e.stmt(cas.Comm)
e.block(cas.Body)
}
case ir.ORECV:
// TODO(mdempsky): Consider e.discard(n.Left).
n := n.(*ir.UnaryExpr)
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
case ir.OSEND:
n := n.(*ir.SendStmt)
e.discard(n.Chan)
e.assignHeap(n.Value, "send", n)
case ir.OAS:
n := n.(*ir.AssignStmt)
e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
case ir.OASOP:
n := n.(*ir.AssignOpStmt)
// TODO(mdempsky): Worry about OLSH/ORSH?
e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
case ir.OAS2:
n := n.(*ir.AssignListStmt)
e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
case ir.OAS2DOTTYPE: // v, ok = x.(type)
n := n.(*ir.AssignListStmt)
e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
case ir.OAS2MAPR: // v, ok = m[k]
n := n.(*ir.AssignListStmt)
e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
n := n.(*ir.AssignListStmt)
e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
case ir.OAS2FUNC:
n := n.(*ir.AssignListStmt)
e.stmts(n.Rhs[0].Init())
ks := e.addrs(n.Lhs)
e.call(ks, n.Rhs[0])
e.reassigned(ks, n)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
results := e.curfn.Type().Results().FieldSlice()
dsts := make([]ir.Node, len(results))
for i, res := range results {
dsts[i] = res.Nname.(*ir.Name)
}
e.assignList(dsts, n.Results, "return", n)
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
e.call(nil, n)
case ir.OGO, ir.ODEFER:
n := n.(*ir.GoDeferStmt)
e.goDeferStmt(n)
case ir.OTAILCALL:
// TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
}
}
func (e *escape) stmts(l ir.Nodes) {
for _, n := range l {
e.stmt(n)
}
}
// block is like stmts, but preserves loopDepth.
func (e *escape) block(l ir.Nodes) {
old := e.loopDepth
e.stmts(l)
e.loopDepth = old
}
func (e *escape) dcl(n *ir.Name) hole {
if n.Curfn != e.curfn || n.IsClosureVar() {
base.Fatalf("bad declaration of %v", n)
}
loc := e.oldLoc(n)
loc.loopDepth = e.loopDepth
return loc.asHole()
}

View file

@ -0,0 +1,215 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package escape
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
)
func isSliceSelfAssign(dst, src ir.Node) bool {
// Detect the following special case.
//
// func (b *Buffer) Foo() {
// n, m := ...
// b.buf = b.buf[n:m]
// }
//
// This assignment is a no-op for escape analysis,
// it does not store any new pointers into b that were not already there.
// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
// Here we assume that the statement will not contain calls,
// that is, that order will move any calls to init.
// Otherwise base ONAME value could change between the moments
// when we evaluate it for dst and for src.
// dst is ONAME dereference.
var dstX ir.Node
switch dst.Op() {
default:
return false
case ir.ODEREF:
dst := dst.(*ir.StarExpr)
dstX = dst.X
case ir.ODOTPTR:
dst := dst.(*ir.SelectorExpr)
dstX = dst.X
}
if dstX.Op() != ir.ONAME {
return false
}
// src is a slice operation.
switch src.Op() {
case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
// OK.
case ir.OSLICEARR, ir.OSLICE3ARR:
// Since arrays are embedded into containing object,
// slice of non-pointer array will introduce a new pointer into b that was not already there
// (pointer to b itself). After such assignment, if b contents escape,
// b escapes as well. If we ignore such OSLICEARR, we will conclude
// that b does not escape when b contents do.
//
// Pointer to an array is OK since it's not stored inside b directly.
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
src := src.(*ir.SliceExpr)
if src.X.Op() == ir.OADDR {
return false
}
default:
return false
}
// slice is applied to ONAME dereference.
var baseX ir.Node
switch base := src.(*ir.SliceExpr).X; base.Op() {
default:
return false
case ir.ODEREF:
base := base.(*ir.StarExpr)
baseX = base.X
case ir.ODOTPTR:
base := base.(*ir.SelectorExpr)
baseX = base.X
}
if baseX.Op() != ir.ONAME {
return false
}
// dst and src reference the same base ONAME.
return dstX.(*ir.Name) == baseX.(*ir.Name)
}
// isSelfAssign reports whether assignment from src to dst can
// be ignored by the escape analysis as it's effectively a self-assignment.
func isSelfAssign(dst, src ir.Node) bool {
if isSliceSelfAssign(dst, src) {
return true
}
// Detect trivial assignments that assign back to the same object.
//
// It covers these cases:
// val.x = val.y
// val.x[i] = val.y[j]
// val.x1.x2 = val.x1.y2
// ... etc
//
// These assignments do not change assigned object lifetime.
if dst == nil || src == nil || dst.Op() != src.Op() {
return false
}
// The expression prefix must be both "safe" and identical.
switch dst.Op() {
case ir.ODOT, ir.ODOTPTR:
// Safe trailing accessors that are permitted to differ.
dst := dst.(*ir.SelectorExpr)
src := src.(*ir.SelectorExpr)
return ir.SameSafeExpr(dst.X, src.X)
case ir.OINDEX:
dst := dst.(*ir.IndexExpr)
src := src.(*ir.IndexExpr)
if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
return false
}
return ir.SameSafeExpr(dst.X, src.X)
default:
return false
}
}
// mayAffectMemory reports whether evaluation of n may affect the program's
// memory state. If the expression can't affect memory state, then it can be
// safely ignored by the escape analysis.
func mayAffectMemory(n ir.Node) bool {
// We may want to use a list of "memory safe" ops instead of generally
// "side-effect free", which would include all calls and other ops that can
// allocate or change global state. For now, it's safer to start with the latter.
//
// We're ignoring things like division by zero, index out of range,
// and nil pointer dereference here.
// TODO(rsc): It seems like it should be possible to replace this with
// an ir.Any looking for any op that's not the ones in the case statement.
// But that produces changes in the compiled output detected by buildall.
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return false
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
n := n.(*ir.BinaryExpr)
return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
case ir.OCONVNOP, ir.OCONV:
n := n.(*ir.ConvExpr)
return mayAffectMemory(n.X)
case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
n := n.(*ir.UnaryExpr)
return mayAffectMemory(n.X)
case ir.ODOT, ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
return mayAffectMemory(n.X)
case ir.ODEREF:
n := n.(*ir.StarExpr)
return mayAffectMemory(n.X)
default:
return true
}
}
// HeapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
func HeapAllocReason(n ir.Node) string {
if n == nil || n.Type() == nil {
return ""
}
// Parameters are always passed via the stack.
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
return ""
}
}
if n.Type().Width > ir.MaxStackVarSize {
return "too large for stack"
}
if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width > ir.MaxImplicitStackVarSize {
return "too large for stack"
}
if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() > ir.MaxImplicitStackVarSize {
return "too large for stack"
}
if n.Op() == ir.OMETHVALUE && typecheck.MethodValueType(n.(*ir.SelectorExpr)).Size() > ir.MaxImplicitStackVarSize {
return "too large for stack"
}
if n.Op() == ir.OMAKESLICE {
n := n.(*ir.MakeExpr)
r := n.Cap
if r == nil {
r = n.Len
}
if !ir.IsSmallIntConst(r) {
return "non-constant size"
}
if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) > ir.MaxImplicitStackVarSize/t.Elem().Width {
return "too large for stack"
}
}
return ""
}

View file

@ -5,46 +5,16 @@
package gc
import (
"fmt"
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
"fmt"
"go/constant"
)
func exportf(bout *bio.Writer, format string, args ...interface{}) {
fmt.Fprintf(bout, format, args...)
if base.Debug.Export != 0 {
fmt.Printf(format, args...)
}
}
func dumpexport(bout *bio.Writer) {
p := &exporter{marked: make(map[*types.Type]bool)}
for _, n := range typecheck.Target.Exports {
// Must catch it here rather than Export(), because the type can be
// not fully set (still TFORW) when Export() is called.
if n.Type() != nil && n.Type().HasTParam() {
base.Fatalf("Cannot (yet) export a generic type: %v", n)
}
p.markObject(n)
}
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
typecheck.WriteExports(bout.Writer)
size := bout.Offset() - off
exportf(bout, "\n$$\n")
if base.Debug.Export != 0 {
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
}
}
func dumpasmhdr() {
b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
@ -79,83 +49,3 @@ func dumpasmhdr() {
b.Close()
}
type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
// markObject visits a reachable object.
func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class == ir.PFUNC {
inline.Inline_Flood(n, typecheck.Export)
}
}
p.markType(n.Type())
}
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
if p.marked[t] {
return
}
p.marked[t] = true
// If this is a named type, mark all of its associated
// methods. Skip interface types because t.Methods contains
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
if t.Sym() != nil && t.Kind() != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markObject(ir.AsNode(m.Nname))
}
}
}
// Recursively mark any types that can be produced given a
// value of type t: dereferencing a pointer; indexing or
// iterating over an array, slice, or map; receiving from a
// channel; accessing a struct field or interface method; or
// calling a function.
//
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
switch t.Kind() {
case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
case types.TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
case types.TMAP:
p.markType(t.Key())
p.markType(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
case types.TFUNC:
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
case types.TINTER:
for _, f := range t.AllMethods().Slice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
}
}
}
}

View file

@ -32,6 +32,7 @@ import (
"log"
"os"
"runtime"
"sort"
)
func hidePanic() {
@ -159,9 +160,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
dwarf.EnableLogging(base.Debug.DwarfInl != 0)
}
if base.Debug.SoftFloat != 0 {
if buildcfg.Experiment.RegabiArgs {
log.Fatalf("softfloat mode with GOEXPERIMENT=regabiargs not implemented ")
}
ssagen.Arch.SoftFloat = true
}
@ -181,23 +179,41 @@ func Main(archInit func(*ssagen.ArchInfo)) {
typecheck.Target = new(ir.Package)
typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
typecheck.InitUniverse()
typecheck.InitRuntime()
// Parse and typecheck input.
noder.LoadPackage(flag.Args())
dwarfgen.RecordPackageName()
// Prepare for backend processing. This must happen before pkginit,
// because it generates itabs for initializing global variables.
ssagen.InitConfig()
// Build init task.
if initTask := pkginit.Task(); initTask != nil {
typecheck.Export(initTask)
}
// Stability quirk: sort top-level declarations, so we're not
// sensitive to the order that functions are added. In particular,
// the order that noder+typecheck add function closures is very
// subtle, and not important to reproduce.
//
// Note: This needs to happen after pkginit.Task, otherwise it risks
// changing the order in which top-level variables are initialized.
if base.Debug.UnifiedQuirks != 0 {
s := typecheck.Target.Decls
sort.SliceStable(s, func(i, j int) bool {
return s[i].Pos().Before(s[j].Pos())
})
}
// Eliminate some obviously dead code.
// Must happen after typechecking.
for _, n := range typecheck.Target.Decls {
@ -252,6 +268,11 @@ func Main(archInit func(*ssagen.ArchInfo)) {
base.Timer.Start("fe", "escapes")
escape.Funcs(typecheck.Target.Decls)
// TODO(mdempsky): This is a hack. We need a proper, global work
// queue for scheduling function compilation so components don't
// need to adjust their behavior depending on when they're called.
reflectdata.AfterGlobalEscapeAnalysis = true
// Collect information for go:nowritebarrierrec
// checking. This must happen before transforming closures during Walk
// We'll do the final check after write barriers are
@ -260,17 +281,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ssagen.EnableNoWriteBarrierRecCheck()
}
// Prepare for SSA compilation.
// This must be before CompileITabs, because CompileITabs
// can trigger function compilation.
typecheck.InitRuntime()
ssagen.InitConfig()
// Just before compilation, compile itabs found on
// the right side of OCONVIFACE so that methods
// can be de-virtualized during compilation.
ir.CurFunc = nil
reflectdata.CompileITabs()
// Compile top level functions.
// Don't use range--walk can add functions to Target.Decls.

View file

@ -7,6 +7,7 @@ package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/noder"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
@ -103,7 +104,7 @@ func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
func dumpCompilerObj(bout *bio.Writer) {
printObjHeader(bout)
dumpexport(bout)
noder.WriteExports(bout)
}
func dumpdata() {
@ -116,7 +117,7 @@ func dumpdata() {
addsignats(typecheck.Target.Externs)
reflectdata.WriteRuntimeTypes()
reflectdata.WriteTabs()
numPTabs, numITabs := reflectdata.CountTabs()
numPTabs := reflectdata.CountPTabs()
reflectdata.WriteImportStrings()
reflectdata.WriteBasicTypes()
dumpembeds()
@ -157,13 +158,10 @@ func dumpdata() {
if numExports != len(typecheck.Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop")
}
newNumPTabs, newNumITabs := reflectdata.CountTabs()
newNumPTabs := reflectdata.CountPTabs()
if newNumPTabs != numPTabs {
base.Fatalf("ptabs changed after compile functions loop")
}
if newNumITabs != numITabs {
base.Fatalf("itabs changed after compile functions loop")
}
}
func dumpLinkerObj(bout *bio.Writer) {

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -156,7 +155,7 @@ func Import(packages map[string]*types2.Package, path, srcDir string, lookup fun
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
_, pkg, err = iImportData(packages, data[1:], id)
pkg, err = ImportData(packages, string(data[1:]), id)
} else {
err = fmt.Errorf("import %q: old binary export format no longer supported (recompile library)", path)
}

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -10,7 +9,6 @@ import (
"cmd/compile/internal/types2"
"fmt"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@ -64,7 +62,7 @@ const maxTime = 30 * time.Second
func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
list, err := ioutil.ReadDir(dirname)
list, err := os.ReadDir(dirname)
if err != nil {
t.Fatalf("testDir(%s): %s", dirname, err)
}
@ -92,7 +90,7 @@ func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
}
func mktmpdir(t *testing.T) string {
tmpdir, err := ioutil.TempDir("", "gcimporter_test")
tmpdir, err := os.MkdirTemp("", "gcimporter_test")
if err != nil {
t.Fatal("mktmpdir:", err)
}
@ -142,7 +140,7 @@ func TestVersionHandling(t *testing.T) {
}
const dir = "./testdata/versions"
list, err := ioutil.ReadDir(dir)
list, err := os.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
@ -195,7 +193,7 @@ func TestVersionHandling(t *testing.T) {
// create file with corrupted export data
// 1) read file
data, err := ioutil.ReadFile(filepath.Join(dir, name))
data, err := os.ReadFile(filepath.Join(dir, name))
if err != nil {
t.Fatal(err)
}
@ -212,7 +210,7 @@ func TestVersionHandling(t *testing.T) {
// 4) write the file
pkgpath += "_corrupted"
filename := filepath.Join(corruptdir, pkgpath) + ".a"
ioutil.WriteFile(filename, data, 0666)
os.WriteFile(filename, data, 0666)
// test that importing the corrupted file results in an error
_, err = Import(make(map[string]*types2.Package), pkgpath, corruptdir, nil)
@ -261,8 +259,7 @@ var importedObjectTests = []struct {
{"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
{"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
{"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
// go/types.Type has grown much larger - excluded for now
// {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
{"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
}
func TestImportedTypes(t *testing.T) {
@ -457,17 +454,17 @@ func TestIssue13898(t *testing.T) {
t.Fatal("go/types not found")
}
// look for go/types2.Object type
// look for go/types.Object type
obj := lookupObj(t, goTypesPkg.Scope(), "Object")
typ, ok := obj.Type().(*types2.Named)
if !ok {
t.Fatalf("go/types2.Object type is %v; wanted named type", typ)
t.Fatalf("go/types.Object type is %v; wanted named type", typ)
}
// lookup go/types2.Object.Pkg method
// lookup go/types.Object.Pkg method
m, index, indirect := types2.LookupFieldOrMethod(typ, false, nil, "Pkg")
if m == nil {
t.Fatalf("go/types2.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
}
// the method must belong to go/types

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -9,7 +8,6 @@
package importer
import (
"bytes"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"encoding/binary"
@ -19,10 +17,11 @@ import (
"io"
"math/big"
"sort"
"strings"
)
type intReader struct {
*bytes.Reader
*strings.Reader
path string
}
@ -42,6 +41,21 @@ func (r *intReader) uint64() uint64 {
return i
}
// Keep this in sync with constants in iexport.go.
const (
iexportVersionGo1_11 = 0
iexportVersionPosCol = 1
// TODO: before release, change this back to 2.
iexportVersionGenerics = iexportVersionPosCol
iexportVersionCurrent = iexportVersionGenerics
)
type ident struct {
pkg string
name string
}
const predeclReserved = 32
type itag uint64
@ -57,6 +71,9 @@ const (
signatureType
structType
interfaceType
typeParamType
instType
unionType
)
const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
@ -65,8 +82,8 @@ const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
// and returns the number of bytes consumed and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func iImportData(imports map[string]*types2.Package, data []byte, path string) (_ int, pkg *types2.Package, err error) {
const currentVersion = 1
func ImportData(imports map[string]*types2.Package, data, path string) (pkg *types2.Package, err error) {
const currentVersion = iexportVersionCurrent
version := int64(-1)
defer func() {
if e := recover(); e != nil {
@ -78,13 +95,17 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
}
}()
r := &intReader{bytes.NewReader(data), path}
r := &intReader{strings.NewReader(data), path}
version = int64(r.uint64())
switch version {
case currentVersion, 0:
case /* iexportVersionGenerics, */ iexportVersionPosCol, iexportVersionGo1_11:
default:
errorf("unknown iexport format version %d", version)
if version > iexportVersionGenerics {
errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
} else {
errorf("unknown iexport format version %d", version)
}
}
sLen := int64(r.uint64())
@ -96,16 +117,20 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
r.Seek(sLen+dLen, io_SeekCurrent)
p := iimporter{
ipath: path,
version: int(version),
exportVersion: version,
ipath: path,
version: int(version),
stringData: stringData,
stringCache: make(map[uint64]string),
pkgCache: make(map[uint64]*types2.Package),
stringData: stringData,
pkgCache: make(map[uint64]*types2.Package),
posBaseCache: make(map[uint64]*syntax.PosBase),
declData: declData,
pkgIndex: make(map[*types2.Package]map[string]uint64),
typCache: make(map[uint64]types2.Type),
// Separate map for typeparams, keyed by their package and unique
// name (name with subscript).
tparamIndex: make(map[ident]types2.Type),
}
for i, pt := range predeclared {
@ -117,17 +142,22 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
_ = r.uint64() // package height; unused by go/types
pkgHeight := int(r.uint64())
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
pkg = types2.NewPackage(pkgPath, pkgName)
pkg = types2.NewPackageHeight(pkgPath, pkgName, pkgHeight)
imports[pkgPath] = pkg
} else if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
} else {
if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
}
if pkg.Height() != pkgHeight {
errorf("conflicting heights %v and %v for package %q", pkg.Height(), pkgHeight, path)
}
}
p.pkgCache[pkgPathOff] = pkg
@ -153,10 +183,6 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
p.doDecl(localpkg, name)
}
for _, typ := range p.interfaceList {
typ.Complete()
}
// record all referenced packages as imports
list := append(([]*types2.Package)(nil), pkgList[1:]...)
sort.Sort(byPath(list))
@ -165,21 +191,22 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
// package was imported completely and without errors
localpkg.MarkComplete()
consumed, _ := r.Seek(0, io_SeekCurrent)
return int(consumed), localpkg, nil
return localpkg, nil
}
type iimporter struct {
ipath string
version int
exportVersion int64
ipath string
version int
stringData []byte
stringCache map[uint64]string
pkgCache map[uint64]*types2.Package
stringData string
pkgCache map[uint64]*types2.Package
posBaseCache map[uint64]*syntax.PosBase
declData []byte
pkgIndex map[*types2.Package]map[string]uint64
typCache map[uint64]types2.Type
declData string
pkgIndex map[*types2.Package]map[string]uint64
typCache map[uint64]types2.Type
tparamIndex map[ident]types2.Type
interfaceList []*types2.Interface
}
@ -199,24 +226,21 @@ func (p *iimporter) doDecl(pkg *types2.Package, name string) {
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off:])
r.declReader = *bytes.NewReader(p.declData[off:])
r.declReader = *strings.NewReader(p.declData[off:])
r.obj(name)
}
func (p *iimporter) stringAt(off uint64) string {
if s, ok := p.stringCache[off]; ok {
return s
}
var x [binary.MaxVarintLen64]byte
n := copy(x[:], p.stringData[off:])
slen, n := binary.Uvarint(p.stringData[off:])
slen, n := binary.Uvarint(x[:n])
if n <= 0 {
errorf("varint failed")
}
spos := off + uint64(n)
s := string(p.stringData[spos : spos+slen])
p.stringCache[off] = s
return s
return p.stringData[spos : spos+slen]
}
func (p *iimporter) pkgAt(off uint64) *types2.Package {
@ -228,6 +252,16 @@ func (p *iimporter) pkgAt(off uint64) *types2.Package {
return nil
}
func (p *iimporter) posBaseAt(off uint64) *syntax.PosBase {
if posBase, ok := p.posBaseCache[off]; ok {
return posBase
}
filename := p.stringAt(off)
posBase := syntax.NewFileBase(filename)
p.posBaseCache[off] = posBase
return posBase
}
func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
return t
@ -241,7 +275,7 @@ func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off-predeclReserved:])
r.declReader = *bytes.NewReader(p.declData[off-predeclReserved:])
r.declReader = *strings.NewReader(p.declData[off-predeclReserved:])
t := r.doType(base)
if base == nil || !isInterface(t) {
@ -251,12 +285,12 @@ func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
}
type importReader struct {
p *iimporter
declReader bytes.Reader
currPkg *types2.Package
prevFile string
prevLine int64
prevColumn int64
p *iimporter
declReader strings.Reader
currPkg *types2.Package
prevPosBase *syntax.PosBase
prevLine int64
prevColumn int64
}
func (r *importReader) obj(name string) {
@ -274,16 +308,28 @@ func (r *importReader) obj(name string) {
r.declare(types2.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
case 'F', 'G':
var tparams []*types2.TypeName
if tag == 'G' {
tparams = r.tparamList()
}
sig := r.signature(nil)
sig.SetTParams(tparams)
r.declare(types2.NewFunc(pos, r.currPkg, name, sig))
case 'T':
case 'T', 'U':
var tparams []*types2.TypeName
if tag == 'U' {
tparams = r.tparamList()
}
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types2.NewTypeName(pos, r.currPkg, name, nil)
named := types2.NewNamed(obj, nil, nil)
if tag == 'U' {
named.SetTParams(tparams)
}
r.declare(obj)
underlying := r.p.typAt(r.uint64(), named).Underlying()
@ -296,10 +342,43 @@ func (r *importReader) obj(name string) {
recv := r.param()
msig := r.signature(recv)
// If the receiver has any targs, set those as the
// rparams of the method (since those are the
// typeparams being used in the method sig/body).
targs := baseType(msig.Recv().Type()).TArgs()
if len(targs) > 0 {
rparams := make([]*types2.TypeName, len(targs))
for i, targ := range targs {
rparams[i] = types2.AsTypeParam(targ).Obj()
}
msig.SetRParams(rparams)
}
named.AddMethod(types2.NewFunc(mpos, r.currPkg, mname, msig))
}
}
case 'P':
// We need to "declare" a typeparam in order to have a name that
// can be referenced recursively (if needed) in the type param's
// bound.
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected type param type")
}
name0, sub := parseSubscript(name)
tn := types2.NewTypeName(pos, r.currPkg, name0, nil)
t := (*types2.Checker)(nil).NewTypeParam(tn, nil)
if sub == 0 {
errorf("missing subscript")
}
t.SetId(sub)
// To handle recursive references to the typeparam within its
// bound, save the partial type in tparamIndex before reading the bounds.
id := ident{r.currPkg.Name(), name}
r.p.tparamIndex[id] = t
t.SetConstraint(r.typ())
case 'V':
typ := r.typ()
@ -439,12 +518,11 @@ func (r *importReader) pos() syntax.Pos {
r.posv0()
}
if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
if (r.prevPosBase == nil || r.prevPosBase.Filename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
return syntax.Pos{}
}
// TODO(gri) fix this
// return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
return syntax.Pos{}
return syntax.MakePos(r.prevPosBase, uint(r.prevLine), uint(r.prevColumn))
}
func (r *importReader) posv0() {
@ -454,7 +532,7 @@ func (r *importReader) posv0() {
} else if l := r.int64(); l == -1 {
r.prevLine += deltaNewFile
} else {
r.prevFile = r.string()
r.prevPosBase = r.posBase()
r.prevLine = l
}
}
@ -466,7 +544,7 @@ func (r *importReader) posv1() {
delta = r.int64()
r.prevLine += delta >> 1
if delta&1 != 0 {
r.prevFile = r.string()
r.prevPosBase = r.posBase()
}
}
}
@ -480,8 +558,9 @@ func isInterface(t types2.Type) bool {
return ok
}
func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) posBase() *syntax.PosBase { return r.p.posBaseAt(r.uint64()) }
func (r *importReader) doType(base *types2.Named) types2.Type {
switch k := r.kind(); k {
@ -554,6 +633,47 @@ func (r *importReader) doType(base *types2.Named) types2.Type {
typ := types2.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
case typeParamType:
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected type param type")
}
pkg, name := r.qualifiedIdent()
id := ident{pkg.Name(), name}
if t, ok := r.p.tparamIndex[id]; ok {
// We're already in the process of importing this typeparam.
return t
}
// Otherwise, import the definition of the typeparam now.
r.p.doDecl(pkg, name)
return r.p.tparamIndex[id]
case instType:
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected instantiation type")
}
pos := r.pos()
len := r.uint64()
targs := make([]types2.Type, len)
for i := range targs {
targs[i] = r.typ()
}
baseType := r.typ()
// The imported instantiated type doesn't include any methods, so
// we must always use the methods of the base (orig) type.
var check *types2.Checker // TODO provide a non-nil *Checker
t := check.Instantiate(pos, baseType, targs, nil, false)
return t
case unionType:
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected instantiation type")
}
terms := make([]*types2.Term, r.uint64())
for i := range terms {
terms[i] = types2.NewTerm(r.bool(), r.typ())
}
return types2.NewUnion(terms)
}
}
@ -568,6 +688,19 @@ func (r *importReader) signature(recv *types2.Var) *types2.Signature {
return types2.NewSignature(recv, params, results, variadic)
}
func (r *importReader) tparamList() []*types2.TypeName {
n := r.uint64()
if n == 0 {
return nil
}
xs := make([]*types2.TypeName, n)
for i := range xs {
typ := r.typ()
xs[i] = types2.AsTypeParam(typ).Obj()
}
return xs
}
func (r *importReader) paramList() *types2.Tuple {
xs := make([]*types2.Var, r.uint64())
for i := range xs {
@ -610,3 +743,33 @@ func (r *importReader) byte() byte {
}
return x
}
func baseType(typ types2.Type) *types2.Named {
// pointer receivers are never types2.Named types
if p, _ := typ.(*types2.Pointer); p != nil {
typ = p.Elem()
}
// receiver base types are always (possibly generic) types2.Named types
n, _ := typ.(*types2.Named)
return n
}
func parseSubscript(name string) (string, uint64) {
// Extract the subscript value from the type param name. We export
// and import the subscript value, so that all type params have
// unique names.
sub := uint64(0)
startsub := -1
for i, r := range name {
if '₀' <= r && r < '₀'+10 {
if startsub == -1 {
startsub = i
}
sub = sub*10 + uint64(r-'₀')
}
}
if startsub >= 0 {
name = name[:startsub]
}
return name, sub
}

View file

@ -1,4 +1,3 @@
// UNREVIEWED
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@ -120,6 +119,9 @@ var predeclared = []types2.Type{
// used internally by gc; never used by this package or in .a files
anyType{},
// comparable
types2.Universe.Lookup("comparable").Type(),
}
type anyType struct{}

View file

@ -179,6 +179,8 @@ func CanInline(fn *ir.Func) {
Cost: inlineMaxBudget - visitor.budget,
Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
Body: inlcopylist(fn.Body),
CanDelayResults: canDelayResults(fn),
}
if base.Flag.LowerM > 1 {
@ -191,60 +193,36 @@ func CanInline(fn *ir.Func) {
}
}
// Inline_Flood marks n's inline body for export and recursively ensures
// all called functions are marked too.
func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
if n == nil {
return
}
if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
base.Fatalf("Inline_Flood: unexpected %v, %v, %v", n, n.Op(), n.Class)
}
fn := n.Func
if fn == nil {
base.Fatalf("Inline_Flood: missing Func on %v", n)
}
if fn.Inl == nil {
return
}
// canDelayResults reports whether inlined calls to fn can delay
// declaring the result parameter until the "return" statement.
func canDelayResults(fn *ir.Func) bool {
// We can delay declaring+initializing result parameters if:
// (1) there's exactly one "return" statement in the inlined function;
// (2) it's not an empty return statement (#44355); and
// (3) the result parameters aren't named.
if fn.ExportInline() {
return
}
fn.SetExportInline(true)
typecheck.ImportedBody(fn)
var doFlood func(n ir.Node)
doFlood = func(n ir.Node) {
switch n.Op() {
case ir.OMETHEXPR, ir.ODOTMETH:
Inline_Flood(ir.MethodExprName(n), exportsym)
case ir.ONAME:
n := n.(*ir.Name)
switch n.Class {
case ir.PFUNC:
Inline_Flood(n, exportsym)
exportsym(n)
case ir.PEXTERN:
exportsym(n)
nreturns := 0
ir.VisitList(fn.Body, func(n ir.Node) {
if n, ok := n.(*ir.ReturnStmt); ok {
nreturns++
if len(n.Results) == 0 {
nreturns++ // empty return statement (case 2)
}
}
})
case ir.OCALLPART:
// Okay, because we don't yet inline indirect
// calls to method values.
case ir.OCLOSURE:
// VisitList doesn't visit closure bodies, so force a
// recursive call to VisitList on the body of the closure.
ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
if nreturns != 1 {
return false // not exactly one return statement (case 1)
}
// temporaries for return values.
for _, param := range fn.Type().Results().FieldSlice() {
if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
return false // found a named result parameter (case 3)
}
}
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
ir.VisitList(ir.Nodes(fn.Inl.Body), doFlood)
return true
}
// hairyVisitor visits a function body to determine its inlining
@ -295,6 +273,19 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
}
}
}
if n.X.Op() == ir.OMETHEXPR {
if meth := ir.MethodExprName(n.X); meth != nil {
fn := meth.Func
if fn != nil && types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" {
// Special case: explicitly allow
// mid-stack inlining of
// runtime.heapBits.next even though
// it calls slow-path
// runtime.heapBits.nextArena.
break
}
}
}
if ir.IsIntrinsicCall(n) {
// Treat like any other node.
@ -309,28 +300,8 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLMETH:
n := n.(*ir.CallExpr)
t := n.X.Type()
if t == nil {
base.Fatalf("no function type for [%p] %+v\n", n.X, n.X)
}
fn := ir.MethodExprName(n.X).Func
if types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" {
// Special case: explicitly allow
// mid-stack inlining of
// runtime.heapBits.next even though
// it calls slow-path
// runtime.heapBits.nextArena.
break
}
if fn.Inl != nil {
v.budget -= fn.Inl.Cost
break
}
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
// Things that are too hairy, irrespective of the budget
case ir.OCALL, ir.OCALLINTER:
@ -445,7 +416,7 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// and don't charge for the OBLOCK itself. The ++ undoes the -- below.
v.budget++
case ir.OCALLPART, ir.OSLICELIT:
case ir.OMETHVALUE, ir.OSLICELIT:
v.budget-- // Hack for toolstash -cmp.
case ir.OMETHEXPR:
@ -499,9 +470,6 @@ func inlcopy(n ir.Node) ir.Node {
// x.Func.Body for iexport and local inlining.
oldfn := x.Func
newfn := ir.NewFunc(oldfn.Pos())
if oldfn.ClosureCalled() {
newfn.SetClosureCalled(true)
}
m.(*ir.ClosureExpr).Func = newfn
newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
// XXX OK to share fn.Type() ??
@ -544,37 +512,6 @@ func InlineCalls(fn *ir.Func) {
ir.CurFunc = savefn
}
// Turn an OINLCALL into a statement.
func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node {
n := ir.NewBlockStmt(inlcall.Pos(), nil)
n.List = inlcall.Init()
n.List.Append(inlcall.Body.Take()...)
return n
}
// Turn an OINLCALL into a single valued expression.
// The result of inlconv2expr MUST be assigned back to n, e.g.
// n.Left = inlconv2expr(n.Left)
func inlconv2expr(n *ir.InlinedCallExpr) ir.Node {
r := n.ReturnVars[0]
return ir.InitExpr(append(n.Init(), n.Body...), r)
}
// Turn the rlist (with the return values) of the OINLCALL in
// n into an expression list lumping the ninit and body
// containing the inlined statements on the first list element so
// order will be preserved. Used in return, oas2func and call
// statements.
func inlconv2list(n *ir.InlinedCallExpr) []ir.Node {
if n.Op() != ir.OINLCALL || len(n.ReturnVars) == 0 {
base.Fatalf("inlconv2list %+v\n", n)
}
s := n.ReturnVars
s[0] = ir.InitExpr(append(n.Init(), n.Body...), s[0])
return s
}
// inlnode recurses over the tree to find inlineable calls, which will
// be turned into OINLCALLs by mkinlcall. When the recursion comes
// back up will examine left, right, list, rlist, ninit, ntest, nincr,
@ -597,7 +534,9 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
case ir.ODEFER, ir.OGO:
n := n.(*ir.GoDeferStmt)
switch call := n.Call; call.Op() {
case ir.OCALLFUNC, ir.OCALLMETH:
case ir.OCALLMETH:
base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
case ir.OCALLFUNC:
call := call.(*ir.CallExpr)
call.NoInline = true
}
@ -607,11 +546,18 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
case ir.OCLOSURE:
return n
case ir.OCALLMETH:
// Prevent inlining some reflect.Value methods when using checkptr,
// even when package reflect was compiled without it (#35073).
base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
if s := ir.MethodExprName(n.X).Sym(); base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
return n
if n.X.Op() == ir.OMETHEXPR {
// Prevent inlining some reflect.Value methods when using checkptr,
// even when package reflect was compiled without it (#35073).
if meth := ir.MethodExprName(n.X); meth != nil {
s := meth.Sym()
if base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
return n
}
}
}
}
@ -619,31 +565,18 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
ir.EditChildren(n, edit)
if as := n; as.Op() == ir.OAS2FUNC {
as := as.(*ir.AssignListStmt)
if as.Rhs[0].Op() == ir.OINLCALL {
as.Rhs = inlconv2list(as.Rhs[0].(*ir.InlinedCallExpr))
as.SetOp(ir.OAS2)
as.SetTypecheck(0)
n = typecheck.Stmt(as)
}
}
// with all the branches out of the way, it is now time to
// transmogrify this node itself unless inhibited by the
// switch at the top of this function.
switch n.Op() {
case ir.OCALLFUNC, ir.OCALLMETH:
n := n.(*ir.CallExpr)
if n.NoInline {
return n
}
}
case ir.OCALLMETH:
base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
var call *ir.CallExpr
switch n.Op() {
case ir.OCALLFUNC:
call = n.(*ir.CallExpr)
call := n.(*ir.CallExpr)
if call.NoInline {
break
}
if base.Flag.LowerM > 3 {
fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
}
@ -653,38 +586,10 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
if fn := inlCallee(call.X); fn != nil && fn.Inl != nil {
n = mkinlcall(call, fn, maxCost, inlMap, edit)
}
case ir.OCALLMETH:
call = n.(*ir.CallExpr)
if base.Flag.LowerM > 3 {
fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.X.(*ir.SelectorExpr).Sel)
}
// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
if call.X.Type() == nil {
base.Fatalf("no function type for [%p] %+v\n", call.X, call.X)
}
n = mkinlcall(call, ir.MethodExprName(call.X).Func, maxCost, inlMap, edit)
}
base.Pos = lno
if n.Op() == ir.OINLCALL {
ic := n.(*ir.InlinedCallExpr)
switch call.Use {
default:
ir.Dump("call", call)
base.Fatalf("call missing use")
case ir.CallUseExpr:
n = inlconv2expr(ic)
case ir.CallUseStmt:
n = inlconv2stmt(ic)
case ir.CallUseList:
// leave for caller to convert
}
}
return n
}
@ -740,7 +645,12 @@ var inlgen int
// when producing output for debugging the compiler itself.
var SSADumpInline = func(*ir.Func) {}
// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
// NewInline allows the inliner implementation to be overridden.
// If it returns nil, the legacy inliner will handle this call
// instead.
var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { return nil }
// If n is a OCALLFUNC node, and fn is an ONAME node for a
// function with an inlinable body, return an OINLCALL node that can replace n.
// The returned node's Ninit has the parameter assignments, the Nbody is the
// inlined function body, and (List, Rlist) contain the (input, output)
@ -793,38 +703,90 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
defer func() {
inlMap[fn] = false
}()
if base.Debug.TypecheckInl == 0 {
typecheck.ImportedBody(fn)
typecheck.FixVariadicCall(n)
parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
sym := fn.Linksym()
inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
if base.Flag.GenDwarfInl > 0 {
if !sym.WasInlined() {
base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
sym.Set(obj.AttrWasInlined, true)
}
}
// We have a function node, and it has an inlineable body.
if base.Flag.LowerM > 1 {
fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.Nodes(fn.Inl.Body))
} else if base.Flag.LowerM != 0 {
if base.Flag.LowerM != 0 {
fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
if base.Flag.LowerM > 2 {
fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
}
res := NewInline(n, fn, inlIndex)
if res == nil {
res = oldInline(n, fn, inlIndex)
}
// transitive inlining
// might be nice to do this before exporting the body,
// but can't emit the body with inlining expanded.
// instead we emit the things that the body needs
// and each use must redo the inlining.
// luckily these are small.
ir.EditChildren(res, edit)
if base.Flag.LowerM > 2 {
fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
}
return res
}
// CalleeEffects appends any side effects from evaluating callee to init.
func CalleeEffects(init *ir.Nodes, callee ir.Node) {
for {
switch callee.Op() {
case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR:
return // done
case ir.OCONVNOP:
conv := callee.(*ir.ConvExpr)
init.Append(ir.TakeInit(conv)...)
callee = conv.X
case ir.OINLCALL:
ic := callee.(*ir.InlinedCallExpr)
init.Append(ir.TakeInit(ic)...)
init.Append(ic.Body.Take()...)
callee = ic.SingleResult()
default:
base.FatalfAt(callee.Pos(), "unexpected callee expression: %v", callee)
}
}
}
// oldInline creates an InlinedCallExpr to replace the given call
// expression. fn is the callee function to be inlined. inlIndex is
// the inlining tree position index, for use with src.NewInliningBase
// when rewriting positions.
func oldInline(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
if base.Debug.TypecheckInl == 0 {
typecheck.ImportedBody(fn)
}
SSADumpInline(fn)
ninit := n.Init()
ninit := call.Init()
// For normal function calls, the function callee expression
// may contain side effects (e.g., added by addinit during
// inlconv2expr or inlconv2list). Make sure to preserve these,
// may contain side effects. Make sure to preserve these,
// if necessary (#42703).
if n.Op() == ir.OCALLFUNC {
callee := n.X
for callee.Op() == ir.OCONVNOP {
conv := callee.(*ir.ConvExpr)
ninit.Append(ir.TakeInit(conv)...)
callee = conv.X
}
if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR {
base.Fatalf("unexpected callee expression: %v", callee)
}
if call.Op() == ir.OCALLFUNC {
CalleeEffects(&ninit, call.X)
}
// Make temp names to use instead of the originals.
@ -854,25 +816,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
}
// We can delay declaring+initializing result parameters if:
// (1) there's exactly one "return" statement in the inlined function;
// (2) it's not an empty return statement (#44355); and
// (3) the result parameters aren't named.
delayretvars := true
nreturns := 0
ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
if n, ok := n.(*ir.ReturnStmt); ok {
nreturns++
if len(n.Results) == 0 {
delayretvars = false // empty return statement (case 2)
}
}
})
if nreturns != 1 {
delayretvars = false // not exactly one return statement (case 1)
}
// temporaries for return values.
var retvars []ir.Node
for i, t := range fn.Type().Results().Fields().Slice() {
@ -882,7 +825,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
m = inlvar(n)
m = typecheck.Expr(m).(*ir.Name)
inlvars[n] = m
delayretvars = false // found a named result parameter (case 3)
} else {
// anonymous return values, synthesize names for use in assignment that replaces return
m = retvar(t, i)
@ -905,61 +847,23 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
// Assign arguments to the parameters' temp names.
as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
as.Def = true
if n.Op() == ir.OCALLMETH {
sel := n.X.(*ir.SelectorExpr)
if sel.X == nil {
base.Fatalf("method call without receiver: %+v", n)
}
as.Rhs.Append(sel.X)
if call.Op() == ir.OCALLMETH {
base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
}
as.Rhs.Append(n.Args...)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
var vas *ir.AssignStmt
as.Rhs.Append(call.Args...)
if recv := fn.Type().Recv(); recv != nil {
as.Lhs.Append(inlParam(recv, as, inlvars))
}
for _, param := range fn.Type().Params().Fields().Slice() {
// For ordinary parameters or variadic parameters in
// dotted calls, just add the variable to the
// assignment list, and we're done.
if !param.IsDDD() || n.IsDDD {
as.Lhs.Append(inlParam(param, as, inlvars))
continue
}
// Otherwise, we need to collect the remaining values
// to pass as a slice.
x := len(as.Lhs)
for len(as.Lhs) < len(as.Rhs) {
as.Lhs.Append(argvar(param.Type, len(as.Lhs)))
}
varargs := as.Lhs[x:]
vas = ir.NewAssignStmt(base.Pos, nil, nil)
vas.X = inlParam(param, vas, inlvars)
if len(varargs) == 0 {
vas.Y = typecheck.NodNil()
vas.Y.SetType(param.Type)
} else {
lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type), nil)
lit.List = varargs
vas.Y = lit
}
as.Lhs.Append(inlParam(param, as, inlvars))
}
if len(as.Rhs) != 0 {
ninit.Append(typecheck.Stmt(as))
}
if vas != nil {
ninit.Append(typecheck.Stmt(vas))
}
if !delayretvars {
if !fn.Inl.CanDelayResults {
// Zero the return parameters.
for _, n := range retvars {
ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
@ -972,40 +876,21 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
inlgen++
parent := -1
if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
parent = b.InliningIndex()
}
sym := fn.Linksym()
newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
// Add an inline mark just before the inlined body.
// This mark is inline in the code so that it's a reasonable spot
// to put a breakpoint. Not sure if that's really necessary or not
// (in which case it could go at the end of the function instead).
// Note issue 28603.
inlMark := ir.NewInlineMarkStmt(base.Pos, types.BADWIDTH)
inlMark.SetPos(n.Pos().WithIsStmt())
inlMark.Index = int64(newIndex)
ninit.Append(inlMark)
if base.Flag.GenDwarfInl > 0 {
if !sym.WasInlined() {
base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
sym.Set(obj.AttrWasInlined, true)
}
}
ninit.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(inlIndex)))
subst := inlsubst{
retlabel: retlabel,
retvars: retvars,
delayretvars: delayretvars,
inlvars: inlvars,
defnMarker: ir.NilExpr{},
bases: make(map[*src.PosBase]*src.PosBase),
newInlIndex: newIndex,
fn: fn,
retlabel: retlabel,
retvars: retvars,
inlvars: inlvars,
defnMarker: ir.NilExpr{},
bases: make(map[*src.PosBase]*src.PosBase),
newInlIndex: inlIndex,
fn: fn,
}
subst.edit = subst.node
@ -1026,26 +911,11 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
//dumplist("ninit post", ninit);
call := ir.NewInlinedCallExpr(base.Pos, nil, nil)
*call.PtrInit() = ninit
call.Body = body
call.ReturnVars = retvars
call.SetType(n.Type())
call.SetTypecheck(1)
// transitive inlining
// might be nice to do this before exporting the body,
// but can't emit the body with inlining expanded.
// instead we emit the things that the body needs
// and each use must redo the inlining.
// luckily these are small.
ir.EditChildren(call, edit)
if base.Flag.LowerM > 2 {
fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call)
}
return call
res := ir.NewInlinedCallExpr(base.Pos, body, retvars)
res.SetInit(ninit)
res.SetType(call.Type())
res.SetTypecheck(1)
return res
}
// Every time we expand a function we generate a new set of tmpnames,
@ -1058,8 +928,10 @@ func inlvar(var_ *ir.Name) *ir.Name {
n := typecheck.NewName(var_.Sym())
n.SetType(var_.Type())
n.SetTypecheck(1)
n.Class = ir.PAUTO
n.SetUsed(true)
n.SetAutoTemp(var_.AutoTemp())
n.Curfn = ir.CurFunc // the calling function, not the called one
n.SetAddrtaken(var_.Addrtaken())
@ -1071,18 +943,7 @@ func inlvar(var_ *ir.Name) *ir.Name {
func retvar(t *types.Field, i int) *ir.Name {
n := typecheck.NewName(typecheck.LookupNum("~R", i))
n.SetType(t.Type)
n.Class = ir.PAUTO
n.SetUsed(true)
n.Curfn = ir.CurFunc // the calling function, not the called one
ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
return n
}
// Synthesize a variable to store the inlined function's arguments
// when they come from a multiple return call.
func argvar(t *types.Type, i int) ir.Node {
n := typecheck.NewName(typecheck.LookupNum("~arg", i))
n.SetType(t.Elem())
n.SetTypecheck(1)
n.Class = ir.PAUTO
n.SetUsed(true)
n.Curfn = ir.CurFunc // the calling function, not the called one
@ -1099,10 +960,6 @@ type inlsubst struct {
// Temporary result variables.
retvars []ir.Node
// Whether result variables should be initialized at the
// "return" statement.
delayretvars bool
inlvars map[*ir.Name]*ir.Name
// defnMarker is used to mark a Node for reassignment.
// inlsubst.clovar set this during creating new ONAME.
@ -1157,17 +1014,21 @@ func (subst *inlsubst) fields(oldt *types.Type) []*types.Field {
// clovar creates a new ONAME node for a local variable or param of a closure
// inside a function being inlined.
func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
// TODO(danscales): want to get rid of this shallow copy, with code like the
// following, but it is hard to copy all the necessary flags in a maintainable way.
// m := ir.NewNameAt(n.Pos(), n.Sym())
// m.Class = n.Class
// m.SetType(n.Type())
// m.SetTypecheck(1)
//if n.IsClosureVar() {
// m.SetIsClosureVar(true)
//}
m := &ir.Name{}
*m = *n
m := ir.NewNameAt(n.Pos(), n.Sym())
m.Class = n.Class
m.SetType(n.Type())
m.SetTypecheck(1)
if n.IsClosureVar() {
m.SetIsClosureVar(true)
}
if n.Addrtaken() {
m.SetAddrtaken(true)
}
if n.Used() {
m.SetUsed(true)
}
m.Defn = n.Defn
m.Curfn = subst.newclofn
switch defn := n.Defn.(type) {
@ -1222,8 +1083,6 @@ func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
// closure does the necessary substitions for a ClosureExpr n and returns the new
// closure node.
func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
m := ir.Copy(n)
// Prior to the subst edit, set a flag in the inlsubst to
// indicated that we don't want to update the source positions in
// the new closure. If we do this, it will appear that the closure
@ -1231,29 +1090,16 @@ func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
// issue #46234 for more details.
defer func(prev bool) { subst.noPosUpdate = prev }(subst.noPosUpdate)
subst.noPosUpdate = true
ir.EditChildren(m, subst.edit)
//fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc))
// The following is similar to funcLit
oldfn := n.Func
newfn := ir.NewFunc(oldfn.Pos())
// These three lines are not strictly necessary, but just to be clear
// that new function needs to redo typechecking and inlinability.
newfn.SetTypecheck(0)
newfn.SetInlinabilityChecked(false)
newfn.Inl = nil
newfn.SetIsHiddenClosure(true)
newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym())
newfn.Nname.Func = newfn
newfn := ir.NewClosureFunc(oldfn.Pos(), true)
// Ntype can be nil for -G=3 mode.
if oldfn.Nname.Ntype != nil {
newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype)
}
newfn.Nname.Defn = newfn
m.(*ir.ClosureExpr).Func = newfn
newfn.OClosure = m.(*ir.ClosureExpr)
if subst.newclofn != nil {
//fmt.Printf("Inlining a closure with a nested closure\n")
@ -1303,13 +1149,9 @@ func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
// Actually create the named function for the closure, now that
// the closure is inlined in a specific function.
m.SetTypecheck(0)
if oldfn.ClosureCalled() {
typecheck.Callee(m)
} else {
typecheck.Expr(m)
}
return m
newclo := newfn.OClosure
newclo.SetInit(subst.list(n.Init()))
return typecheck.Expr(newclo)
}
// node recursively copies a node from the saved pristine body of the
@ -1391,7 +1233,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
}
as.Rhs = subst.list(n.Results)
if subst.delayretvars {
if subst.fn.Inl.CanDelayResults {
for _, n := range as.Lhs {
as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
n.Name().Defn = as

View file

@ -142,28 +142,15 @@ func (n *BinaryExpr) SetOp(op Op) {
}
}
// A CallUse records how the result of the call is used:
type CallUse byte
const (
_ CallUse = iota
CallUseExpr // single expression result is used
CallUseList // list of results are used
CallUseStmt // results not used - call is a statement
)
// A CallExpr is a function call X(Args).
type CallExpr struct {
miniExpr
origNode
X Node
Args Nodes
KeepAlive []*Name // vars to be kept alive until call returns
IsDDD bool
Use CallUse
NoInline bool
PreserveClosure bool // disable directClosureCall for this call
X Node
Args Nodes
KeepAlive []*Name // vars to be kept alive until call returns
IsDDD bool
NoInline bool
}
func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
@ -181,8 +168,12 @@ func (n *CallExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER:
case OAPPEND,
OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
ODELETE,
OGETG, OGETCALLERPC, OGETCALLERSP,
OMAKE, OPRINT, OPRINTN,
ORECOVER, ORECOVERFP:
n.op = op
}
}
@ -192,8 +183,10 @@ type ClosureExpr struct {
miniExpr
Func *Func `mknode:"-"`
Prealloc *Name
IsGoWrap bool // whether this is wrapper closure of a go statement
}
// Deprecated: Use NewClosureFunc instead.
func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
n := &ClosureExpr{Func: fn}
n.op = OCLOSURE
@ -277,7 +270,7 @@ func (n *ConvExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARRPTR:
case OCONV, OCONVIFACE, OCONVIDATA, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARRPTR:
n.op = op
}
}
@ -323,26 +316,24 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
// A StructKeyExpr is an Field: Value composite literal key.
type StructKeyExpr struct {
miniExpr
Field *types.Sym
Value Node
Offset int64
Field *types.Field
Value Node
}
func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr {
func NewStructKeyExpr(pos src.XPos, field *types.Field, value Node) *StructKeyExpr {
n := &StructKeyExpr{Field: field, Value: value}
n.pos = pos
n.op = OSTRUCTKEY
n.Offset = types.BADWIDTH
return n
}
func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
func (n *StructKeyExpr) Sym() *types.Sym { return n.Field.Sym }
// An InlinedCallExpr is an inlined function call.
type InlinedCallExpr struct {
miniExpr
Body Nodes
ReturnVars Nodes
ReturnVars Nodes // must be side-effect free
}
func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
@ -354,6 +345,21 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
return n
}
func (n *InlinedCallExpr) SingleResult() Node {
if have := len(n.ReturnVars); have != 1 {
base.FatalfAt(n.Pos(), "inlined call has %v results, expected 1", have)
}
if !n.Type().HasShape() && n.ReturnVars[0].Type().HasShape() {
// If the type of the call is not a shape, but the type of the return value
// is a shape, we need to do an implicit conversion, so the real type
// of n is maintained.
r := NewConvExpr(n.Pos(), OCONVNOP, n.Type(), n.ReturnVars[0])
r.SetTypecheck(1)
return r
}
return n.ReturnVars[0]
}
// A LogicalExpr is a expression X Op Y where Op is && or ||.
// It is separate from BinaryExpr to make room for statements
// that must be executed before Y but after X.
@ -448,6 +454,20 @@ func (n *ParenExpr) SetOTYPE(t *types.Type) {
t.SetNod(n)
}
// A RawOrigExpr represents an arbitrary Go expression as a string value.
// When printed in diagnostics, the string value is written out exactly as-is.
type RawOrigExpr struct {
miniExpr
Raw string
}
func NewRawOrigExpr(pos src.XPos, op Op, raw string) *RawOrigExpr {
n := &RawOrigExpr{Raw: raw}
n.pos = pos
n.op = op
return n
}
// A ResultExpr represents a direct access to a result.
type ResultExpr struct {
miniExpr
@ -494,10 +514,15 @@ func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type)
// A SelectorExpr is a selector expression X.Sel.
type SelectorExpr struct {
miniExpr
X Node
Sel *types.Sym
X Node
// Sel is the name of the field or method being selected, without (in the
// case of methods) any preceding type specifier. If the field/method is
// exported, than the Sym uses the local package regardless of the package
// of the containing type.
Sel *types.Sym
// The actual selected field - may not be filled in until typechecking.
Selection *types.Field
Prealloc *Name // preallocated storage for OCALLPART, if any
Prealloc *Name // preallocated storage for OMETHVALUE, if any
}
func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
@ -511,7 +536,7 @@ func (n *SelectorExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OCALLPART, OMETHEXPR:
case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OMETHVALUE, OMETHEXPR:
n.op = op
}
}
@ -652,6 +677,38 @@ func (n *TypeAssertExpr) SetOp(op Op) {
}
}
// A DynamicTypeAssertExpr asserts that X is of dynamic type T.
type DynamicTypeAssertExpr struct {
miniExpr
X Node
// N = not an interface
// E = empty interface
// I = nonempty interface
// For E->N, T is a *runtime.type for N
// For I->N, T is a *runtime.itab for N+I
// For E->I, T is a *runtime.type for I
// For I->I, ditto
// For I->E, T is a *runtime.type for interface{} (unnecessary, but just to fill in the slot)
// For E->E, ditto
T Node
}
func NewDynamicTypeAssertExpr(pos src.XPos, op Op, x, t Node) *DynamicTypeAssertExpr {
n := &DynamicTypeAssertExpr{X: x, T: t}
n.pos = pos
n.op = op
return n
}
func (n *DynamicTypeAssertExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
case ODYNAMICDOTTYPE, ODYNAMICDOTTYPE2:
n.op = op
}
}
// A UnaryExpr is a unary expression Op X,
// or Op(X) for a builtin function that does not end up being a call.
type UnaryExpr struct {
@ -678,6 +735,11 @@ func (n *UnaryExpr) SetOp(op Op) {
}
}
// Probably temporary: using Implicit() flag to mark generic function nodes that
// are called to make getGfInfo analysis easier in one pre-order pass.
func (n *InstExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *InstExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
// An InstExpr is a generic function or type instantiation.
type InstExpr struct {
miniExpr
@ -773,6 +835,11 @@ func StaticValue(n Node) Node {
continue
}
if n.Op() == OINLCALL {
n = n.(*InlinedCallExpr).SingleResult()
continue
}
n1 := staticValue1(n)
if n1 == nil {
return n
@ -1071,7 +1138,7 @@ func MethodExprName(n Node) *Name {
// MethodExprFunc is like MethodExprName, but returns the types.Field instead.
func MethodExprFunc(n Node) *types.Field {
switch n.Op() {
case ODOTMETH, OMETHEXPR, OCALLPART:
case ODOTMETH, OMETHEXPR, OMETHVALUE:
return n.(*SelectorExpr).Selection
}
base.Fatalf("unexpected node: %v (%v)", n, n.Op())

View file

@ -185,6 +185,7 @@ var OpPrec = []int{
OCLOSE: 8,
OCOMPLIT: 8,
OCONVIFACE: 8,
OCONVIDATA: 8,
OCONVNOP: 8,
OCONV: 8,
OCOPY: 8,
@ -237,7 +238,7 @@ var OpPrec = []int{
ODOTTYPE: 8,
ODOT: 8,
OXDOT: 8,
OCALLPART: 8,
OMETHVALUE: 8,
OMETHEXPR: 8,
OPLUS: 7,
ONOT: 7,
@ -546,7 +547,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
n = nn.X
continue
}
case OCONV, OCONVNOP, OCONVIFACE:
case OCONV, OCONVNOP, OCONVIFACE, OCONVIDATA:
nn := nn.(*ConvExpr)
if nn.Implicit() {
n = nn.X
@ -567,6 +568,11 @@ func exprFmt(n Node, s fmt.State, prec int) {
return
}
if n, ok := n.(*RawOrigExpr); ok {
fmt.Fprint(s, n.Raw)
return
}
switch n.Op() {
case OPAREN:
n := n.(*ParenExpr)
@ -709,6 +715,10 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "... argument")
return
}
if typ := n.Type(); typ != nil {
fmt.Fprintf(s, "%v{%s}", typ, ellipsisIf(len(n.List) != 0))
return
}
if n.Ntype != nil {
fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(len(n.List) != 0))
return
@ -752,7 +762,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
n := n.(*StructKeyExpr)
fmt.Fprintf(s, "%v:%v", n.Field, n.Value)
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OCALLPART, OMETHEXPR:
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OMETHVALUE, OMETHEXPR:
n := n.(*SelectorExpr)
exprFmt(n.X, s, nprec)
if n.Sel == nil {
@ -804,6 +814,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
case OCONV,
OCONVIFACE,
OCONVIDATA,
OCONVNOP,
OBYTES2STR,
ORUNES2STR,
@ -854,6 +865,15 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
fmt.Fprintf(s, "(%.v)", n.Args)
case OINLCALL:
n := n.(*InlinedCallExpr)
// TODO(mdempsky): Print Init and/or Body?
if len(n.ReturnVars) == 1 {
fmt.Fprintf(s, "%v", n.ReturnVars[0])
return
}
fmt.Fprintf(s, "(.%v)", n.ReturnVars)
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
n := n.(*MakeExpr)
if n.Cap != nil {
@ -986,7 +1006,7 @@ func (l Nodes) Format(s fmt.State, verb rune) {
// Dump prints the message s followed by a debug dump of n.
func Dump(s string, n Node) {
fmt.Printf("%s [%p]%+v\n", s, n, n)
fmt.Printf("%s%+v\n", s, n)
}
// DumpList prints the message s followed by a debug dump of each node in the list.
@ -1114,16 +1134,21 @@ func dumpNodeHeader(w io.Writer, n Node) {
}
if n.Pos().IsKnown() {
pfx := ""
fmt.Fprint(w, " # ")
switch n.Pos().IsStmt() {
case src.PosNotStmt:
pfx = "_" // "-" would be confusing
fmt.Fprint(w, "_") // "-" would be confusing
case src.PosIsStmt:
pfx = "+"
fmt.Fprint(w, "+")
}
for i, pos := range base.Ctxt.AllPos(n.Pos(), nil) {
if i > 0 {
fmt.Fprint(w, ",")
}
// TODO(mdempsky): Print line pragma details too.
file := filepath.Base(pos.Filename())
fmt.Fprintf(w, "%s:%d:%d", file, pos.Line(), pos.Col())
}
pos := base.Ctxt.PosTable.Pos(n.Pos())
file := filepath.Base(pos.Filename())
fmt.Fprintf(w, " # %s%s:%d", pfx, file, pos.Line())
}
}

View file

@ -9,6 +9,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
)
// A Func corresponds to a single function in a Go program
@ -39,14 +40,14 @@ import (
// constructs a fresh node.
//
// A method value (t.M) is represented by ODOTMETH/ODOTINTER
// when it is called directly and by OCALLPART otherwise.
// when it is called directly and by OMETHVALUE otherwise.
// These are like method expressions, except that for ODOTMETH/ODOTINTER,
// the method name is stored in Sym instead of Right.
// Each OCALLPART ends up being implemented as a new
// Each OMETHVALUE ends up being implemented as a new
// function, a bit like a closure, with its own ODCLFUNC.
// The OCALLPART uses n.Func to record the linkage to
// The OMETHVALUE uses n.Func to record the linkage to
// the generated ODCLFUNC, but there is no
// pointer from the Func back to the OCALLPART.
// pointer from the Func back to the OMETHVALUE.
type Func struct {
miniNode
Body Nodes
@ -166,6 +167,11 @@ type Inline struct {
// another package is imported.
Dcl []*Name
Body []Node
// CanDelayResults reports whether it's safe for the inliner to delay
// initializing the result parameters until immediately before the
// "return" statement.
CanDelayResults bool
}
// A Mark represents a scope boundary.
@ -196,7 +202,7 @@ const (
funcExportInline // include inline body in export data
funcInstrumentBody // add race/msan instrumentation during SSA construction
funcOpenCodedDeferDisallowed // can't do open-coded defers
funcClosureCalled // closure is only immediately called
funcClosureCalled // closure is only immediately called; used by escape analysis
)
type SymAndPos struct {
@ -272,6 +278,17 @@ func PkgFuncName(f *Func) string {
var CurFunc *Func
// WithFunc invokes do with CurFunc and base.Pos set to curfn and
// curfn.Pos(), respectively, and then restores their previous values
// before returning.
func WithFunc(curfn *Func, do func()) {
oldfn, oldpos := CurFunc, base.Pos
defer func() { CurFunc, base.Pos = oldfn, oldpos }()
CurFunc, base.Pos = curfn, curfn.Pos()
do()
}
func FuncSymName(s *types.Sym) string {
return s.Name + "·f"
}
@ -279,7 +296,7 @@ func FuncSymName(s *types.Sym) string {
// MarkFunc marks a node as a function.
func MarkFunc(n *Name) {
if n.Op() != ONAME || n.Class != Pxxx {
base.Fatalf("expected ONAME/Pxxx node, got %v", n)
base.FatalfAt(n.Pos(), "expected ONAME/Pxxx node, got %v (%v/%v)", n, n.Op(), n.Class)
}
n.Class = PFUNC
@ -296,8 +313,8 @@ func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
}
}
if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap {
base.ErrorfAt(clo.Pos(), "heap-allocated closure %s, not allowed in runtime", FuncName(clo.Func))
}
}
@ -306,3 +323,109 @@ func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
func IsTrivialClosure(clo *ClosureExpr) bool {
return len(clo.Func.ClosureVars) == 0
}
// globClosgen is like Func.Closgen, but for the global scope.
var globClosgen int32
// closureName generates a new unique name for a closure within outerfn.
func closureName(outerfn *Func) *types.Sym {
pkg := types.LocalPkg
outer := "glob."
prefix := "func"
gen := &globClosgen
if outerfn != nil {
if outerfn.OClosure != nil {
prefix = ""
}
pkg = outerfn.Sym().Pkg
outer = FuncName(outerfn)
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
if !IsBlank(outerfn.Nname) {
gen = &outerfn.Closgen
}
}
*gen++
return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
}
// NewClosureFunc creates a new Func to represent a function literal.
// If hidden is true, then the closure is marked hidden (i.e., as a
// function literal contained within another function, rather than a
// package-scope variable initialization expression).
func NewClosureFunc(pos src.XPos, hidden bool) *Func {
fn := NewFunc(pos)
fn.SetIsHiddenClosure(hidden)
fn.Nname = NewNameAt(pos, BlankNode.Sym())
fn.Nname.Func = fn
fn.Nname.Defn = fn
fn.OClosure = NewClosureExpr(pos, fn)
return fn
}
// NameClosure generates a unique for the given function literal,
// which must have appeared within outerfn.
func NameClosure(clo *ClosureExpr, outerfn *Func) {
fn := clo.Func
if fn.IsHiddenClosure() != (outerfn != nil) {
base.FatalfAt(clo.Pos(), "closure naming inconsistency: hidden %v, but outer %v", fn.IsHiddenClosure(), outerfn)
}
name := fn.Nname
if !IsBlank(name) {
base.FatalfAt(clo.Pos(), "closure already named: %v", name)
}
name.SetSym(closureName(outerfn))
MarkFunc(name)
}
// UseClosure checks that the ginen function literal has been setup
// correctly, and then returns it as an expression.
// It must be called after clo.Func.ClosureVars has been set.
func UseClosure(clo *ClosureExpr, pkg *Package) Node {
fn := clo.Func
name := fn.Nname
if IsBlank(name) {
base.FatalfAt(fn.Pos(), "unnamed closure func: %v", fn)
}
// Caution: clo.Typecheck() is still 0 when UseClosure is called by
// tcClosure.
if fn.Typecheck() != 1 || name.Typecheck() != 1 {
base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
}
if clo.Type() == nil || name.Type() == nil {
base.FatalfAt(fn.Pos(), "missing types: %v", fn)
}
if !types.Identical(clo.Type(), name.Type()) {
base.FatalfAt(fn.Pos(), "mismatched types: %v", fn)
}
if base.Flag.W > 1 {
s := fmt.Sprintf("new closure func: %v", fn)
Dump(s, fn)
}
if pkg != nil {
pkg.Decls = append(pkg.Decls, fn)
}
if false && IsTrivialClosure(clo) {
// TODO(mdempsky): Investigate if we can/should optimize this
// case. walkClosure already handles it later, but it could be
// useful to recognize earlier (e.g., it might allow multiple
// inlined calls to a function to share a common trivial closure
// func, rather than cloning it for each inlined call).
}
return clo
}

View file

@ -358,39 +358,74 @@ func (n *Name) Byval() bool {
return n.Canonical().flags&nameByval != 0
}
// NewClosureVar returns a new closure variable for fn to refer to
// outer variable n.
func NewClosureVar(pos src.XPos, fn *Func, n *Name) *Name {
c := NewNameAt(pos, n.Sym())
c.Curfn = fn
c.Class = PAUTOHEAP
c.SetIsClosureVar(true)
c.Defn = n.Canonical()
c.Outer = n
c.SetType(n.Type())
c.SetTypecheck(n.Typecheck())
fn.ClosureVars = append(fn.ClosureVars, c)
return c
}
// NewHiddenParam returns a new hidden parameter for fn with the given
// name and type.
func NewHiddenParam(pos src.XPos, fn *Func, sym *types.Sym, typ *types.Type) *Name {
if fn.OClosure != nil {
base.FatalfAt(fn.Pos(), "cannot add hidden parameters to closures")
}
fn.SetNeedctxt(true)
// Create a fake parameter, disassociated from any real function, to
// pretend to capture.
fake := NewNameAt(pos, sym)
fake.Class = PPARAM
fake.SetType(typ)
fake.SetByval(true)
return NewClosureVar(pos, fn, fake)
}
// CaptureName returns a Name suitable for referring to n from within function
// fn or from the package block if fn is nil. If n is a free variable declared
// within a function that encloses fn, then CaptureName returns a closure
// variable that refers to n and adds it to fn.ClosureVars. Otherwise, it simply
// returns n.
// within a function that encloses fn, then CaptureName returns the closure
// variable that refers to n within fn, creating it if necessary.
// Otherwise, it simply returns n.
func CaptureName(pos src.XPos, fn *Func, n *Name) *Name {
if n.Op() != ONAME || n.Curfn == nil {
return n // okay to use directly
}
if n.IsClosureVar() {
base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
}
if n.Op() != ONAME || n.Curfn == nil || n.Curfn == fn {
return n // okay to use directly
c := n.Innermost
if c == nil {
c = n
}
if c.Curfn == fn {
return c
}
if fn == nil {
base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
}
c := n.Innermost
if c != nil && c.Curfn == fn {
return c
}
// Do not have a closure var for the active closure yet; make one.
c = NewNameAt(pos, n.Sym())
c.Curfn = fn
c.Class = PAUTOHEAP
c.SetIsClosureVar(true)
c.Defn = n
c = NewClosureVar(pos, fn, c)
// Link into list of active closure variables.
// Popped from list in FinishCaptureNames.
c.Outer = n.Innermost
n.Innermost = c
fn.ClosureVars = append(fn.ClosureVars, c)
return c
}

View file

@ -159,7 +159,6 @@ const (
OCALLFUNC // X(Args) (function call f(args))
OCALLMETH // X(Args) (direct method call x.Method(args))
OCALLINTER // X(Args) (interface method call x.Method(args))
OCALLPART // X.Sel (method expression x.Method, not called)
OCAP // cap(X)
OCLOSE // close(X)
OCLOSURE // func Type { Func.Closure.Body } (func literal)
@ -171,6 +170,7 @@ const (
OPTRLIT // &X (X is composite literal)
OCONV // Type(X) (type conversion)
OCONVIFACE // Type(X) (type conversion, to interface)
OCONVIDATA // Builds a data word to store X in an interface. Equivalent to IDATA(CONVIFACE(X)). Is an ir.ConvExpr.
OCONVNOP // Type(X) (type conversion, no effect)
OCOPY // copy(X, Y)
ODCL // var X (declares X of type X.Type)
@ -237,6 +237,7 @@ const (
OSLICE3ARR // X[Low : High : Max] (X is pointer to array)
OSLICEHEADER // sliceheader{Ptr, Len, Cap} (Ptr is unsafe.Pointer, Len is length, Cap is capacity)
ORECOVER // recover()
ORECOVERFP // recover(Args) w/ explicit FP argument
ORECV // <-X
ORUNESTR // Type(X) (Type is string, X is rune)
OSELRECV2 // like OAS2: Lhs = Rhs where len(Lhs)=2, len(Rhs)=1, Rhs[0].Op = ORECV (appears as .Var of OCASE)
@ -249,14 +250,16 @@ const (
OSIZEOF // unsafe.Sizeof(X)
OUNSAFEADD // unsafe.Add(X, Y)
OUNSAFESLICE // unsafe.Slice(X, Y)
OMETHEXPR // method expression
OMETHEXPR // X(Args) (method expression T.Method(args), first argument is the method receiver)
OMETHVALUE // X.Sel (method expression t.Method, not called)
// statements
OBLOCK // { List } (block of code)
OBREAK // break [Label]
// OCASE: case List: Body (List==nil means default)
// For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
// for nil), and, if a type-switch variable is specified, Rlist is an
// for nil) or an ODYNAMICTYPE indicating a runtime type for generics.
// If a type-switch variable is specified, Var is an
// ONAME for the version of the type-switch variable with the specified
// type.
OCASE
@ -317,9 +320,16 @@ const (
OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
OLINKSYMOFFSET // offset within a name
// opcodes for generics
ODYNAMICDOTTYPE // x = i.(T) where T is a type parameter (or derived from a type parameter)
ODYNAMICDOTTYPE2 // x, ok = i.(T) where T is a type parameter (or derived from a type parameter)
ODYNAMICTYPE // a type node for type switches (represents a dynamic target type for a type switch)
// arch-specific opcodes
OTAILCALL // tail call to another function
OGETG // runtime.getg() (read g pointer)
OTAILCALL // tail call to another function
OGETG // runtime.getg() (read g pointer)
OGETCALLERPC // runtime.getcallerpc() (continuation PC in caller frame)
OGETCALLERSP // runtime.getcallersp() (stack pointer in caller frame)
OEND
)
@ -436,18 +446,19 @@ func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name {
return res
}
type PragmaFlag int16
type PragmaFlag uint16
const (
// Func pragmas.
Nointerface PragmaFlag = 1 << iota
Noescape // func parameters don't escape
Norace // func must not have race detector annotations
Nosplit // func should not execute on separate stack
Noinline // func should not be inlined
NoCheckPtr // func should not be instrumented by checkptr
CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
UintptrEscapes // pointers converted to uintptr escape
Nointerface PragmaFlag = 1 << iota
Noescape // func parameters don't escape
Norace // func must not have race detector annotations
Nosplit // func should not execute on separate stack
Noinline // func should not be inlined
NoCheckPtr // func should not be instrumented by checkptr
CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
UintptrKeepAlive // pointers converted to uintptr must be kept alive (compiler internal only)
UintptrEscapes // pointers converted to uintptr escape
// Runtime-only func pragmas.
// See ../../../../runtime/README.md for detailed descriptions.
@ -563,7 +574,7 @@ func OuterValue(n Node) Node {
for {
switch nn := n; nn.Op() {
case OXDOT:
base.Fatalf("OXDOT in walk")
base.FatalfAt(n.Pos(), "OXDOT in walk: %v", n)
case ODOT:
nn := nn.(*SelectorExpr)
n = nn.X

View file

@ -463,6 +463,62 @@ func (n *Decl) editChildren(edit func(Node) Node) {
}
}
func (n *DynamicType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *DynamicType) copy() Node {
c := *n
c.init = copyNodes(c.init)
return &c
}
func (n *DynamicType) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
if n.X != nil && do(n.X) {
return true
}
if n.ITab != nil && do(n.ITab) {
return true
}
return false
}
func (n *DynamicType) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
if n.X != nil {
n.X = edit(n.X).(Node)
}
if n.ITab != nil {
n.ITab = edit(n.ITab).(Node)
}
}
func (n *DynamicTypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *DynamicTypeAssertExpr) copy() Node {
c := *n
c.init = copyNodes(c.init)
return &c
}
func (n *DynamicTypeAssertExpr) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
if n.X != nil && do(n.X) {
return true
}
if n.T != nil && do(n.T) {
return true
}
return false
}
func (n *DynamicTypeAssertExpr) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
if n.X != nil {
n.X = edit(n.X).(Node)
}
if n.T != nil {
n.T = edit(n.T).(Node)
}
}
func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ForStmt) copy() Node {
c := *n
@ -947,6 +1003,22 @@ func (n *RangeStmt) editChildren(edit func(Node) Node) {
}
}
func (n *RawOrigExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *RawOrigExpr) copy() Node {
c := *n
c.init = copyNodes(c.init)
return &c
}
func (n *RawOrigExpr) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
return false
}
func (n *RawOrigExpr) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
}
func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ResultExpr) copy() Node {
c := *n

View file

@ -41,18 +41,18 @@ func _() {
_ = x[OCALLFUNC-30]
_ = x[OCALLMETH-31]
_ = x[OCALLINTER-32]
_ = x[OCALLPART-33]
_ = x[OCAP-34]
_ = x[OCLOSE-35]
_ = x[OCLOSURE-36]
_ = x[OCOMPLIT-37]
_ = x[OMAPLIT-38]
_ = x[OSTRUCTLIT-39]
_ = x[OARRAYLIT-40]
_ = x[OSLICELIT-41]
_ = x[OPTRLIT-42]
_ = x[OCONV-43]
_ = x[OCONVIFACE-44]
_ = x[OCAP-33]
_ = x[OCLOSE-34]
_ = x[OCLOSURE-35]
_ = x[OCOMPLIT-36]
_ = x[OMAPLIT-37]
_ = x[OSTRUCTLIT-38]
_ = x[OARRAYLIT-39]
_ = x[OSLICELIT-40]
_ = x[OPTRLIT-41]
_ = x[OCONV-42]
_ = x[OCONVIFACE-43]
_ = x[OCONVIDATA-44]
_ = x[OCONVNOP-45]
_ = x[OCOPY-46]
_ = x[ODCL-47]
@ -109,65 +109,72 @@ func _() {
_ = x[OSLICE3ARR-98]
_ = x[OSLICEHEADER-99]
_ = x[ORECOVER-100]
_ = x[ORECV-101]
_ = x[ORUNESTR-102]
_ = x[OSELRECV2-103]
_ = x[OIOTA-104]
_ = x[OREAL-105]
_ = x[OIMAG-106]
_ = x[OCOMPLEX-107]
_ = x[OALIGNOF-108]
_ = x[OOFFSETOF-109]
_ = x[OSIZEOF-110]
_ = x[OUNSAFEADD-111]
_ = x[OUNSAFESLICE-112]
_ = x[OMETHEXPR-113]
_ = x[OBLOCK-114]
_ = x[OBREAK-115]
_ = x[OCASE-116]
_ = x[OCONTINUE-117]
_ = x[ODEFER-118]
_ = x[OFALL-119]
_ = x[OFOR-120]
_ = x[OFORUNTIL-121]
_ = x[OGOTO-122]
_ = x[OIF-123]
_ = x[OLABEL-124]
_ = x[OGO-125]
_ = x[ORANGE-126]
_ = x[ORETURN-127]
_ = x[OSELECT-128]
_ = x[OSWITCH-129]
_ = x[OTYPESW-130]
_ = x[OFUNCINST-131]
_ = x[OTCHAN-132]
_ = x[OTMAP-133]
_ = x[OTSTRUCT-134]
_ = x[OTINTER-135]
_ = x[OTFUNC-136]
_ = x[OTARRAY-137]
_ = x[OTSLICE-138]
_ = x[OINLCALL-139]
_ = x[OEFACE-140]
_ = x[OITAB-141]
_ = x[OIDATA-142]
_ = x[OSPTR-143]
_ = x[OCFUNC-144]
_ = x[OCHECKNIL-145]
_ = x[OVARDEF-146]
_ = x[OVARKILL-147]
_ = x[OVARLIVE-148]
_ = x[ORESULT-149]
_ = x[OINLMARK-150]
_ = x[OLINKSYMOFFSET-151]
_ = x[OTAILCALL-152]
_ = x[OGETG-153]
_ = x[OEND-154]
_ = x[ORECOVERFP-101]
_ = x[ORECV-102]
_ = x[ORUNESTR-103]
_ = x[OSELRECV2-104]
_ = x[OIOTA-105]
_ = x[OREAL-106]
_ = x[OIMAG-107]
_ = x[OCOMPLEX-108]
_ = x[OALIGNOF-109]
_ = x[OOFFSETOF-110]
_ = x[OSIZEOF-111]
_ = x[OUNSAFEADD-112]
_ = x[OUNSAFESLICE-113]
_ = x[OMETHEXPR-114]
_ = x[OMETHVALUE-115]
_ = x[OBLOCK-116]
_ = x[OBREAK-117]
_ = x[OCASE-118]
_ = x[OCONTINUE-119]
_ = x[ODEFER-120]
_ = x[OFALL-121]
_ = x[OFOR-122]
_ = x[OFORUNTIL-123]
_ = x[OGOTO-124]
_ = x[OIF-125]
_ = x[OLABEL-126]
_ = x[OGO-127]
_ = x[ORANGE-128]
_ = x[ORETURN-129]
_ = x[OSELECT-130]
_ = x[OSWITCH-131]
_ = x[OTYPESW-132]
_ = x[OFUNCINST-133]
_ = x[OTCHAN-134]
_ = x[OTMAP-135]
_ = x[OTSTRUCT-136]
_ = x[OTINTER-137]
_ = x[OTFUNC-138]
_ = x[OTARRAY-139]
_ = x[OTSLICE-140]
_ = x[OINLCALL-141]
_ = x[OEFACE-142]
_ = x[OITAB-143]
_ = x[OIDATA-144]
_ = x[OSPTR-145]
_ = x[OCFUNC-146]
_ = x[OCHECKNIL-147]
_ = x[OVARDEF-148]
_ = x[OVARKILL-149]
_ = x[OVARLIVE-150]
_ = x[ORESULT-151]
_ = x[OINLMARK-152]
_ = x[OLINKSYMOFFSET-153]
_ = x[ODYNAMICDOTTYPE-154]
_ = x[ODYNAMICDOTTYPE2-155]
_ = x[ODYNAMICTYPE-156]
_ = x[OTAILCALL-157]
_ = x[OGETG-158]
_ = x[OGETCALLERPC-159]
_ = x[OGETCALLERSP-160]
_ = x[OEND-161]
}
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND"
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 213, 216, 221, 228, 235, 241, 250, 258, 266, 272, 276, 285, 292, 296, 299, 306, 314, 321, 327, 330, 336, 343, 351, 355, 362, 370, 372, 374, 376, 378, 380, 382, 387, 392, 400, 403, 412, 415, 419, 427, 434, 443, 456, 459, 462, 465, 468, 471, 474, 480, 483, 486, 492, 496, 499, 503, 508, 513, 519, 524, 528, 533, 541, 549, 555, 564, 575, 582, 586, 593, 601, 605, 609, 613, 620, 627, 635, 641, 650, 661, 669, 674, 679, 683, 691, 696, 700, 703, 711, 715, 717, 722, 724, 729, 735, 741, 747, 753, 761, 766, 770, 777, 783, 788, 794, 800, 807, 812, 816, 821, 825, 830, 838, 844, 851, 858, 864, 871, 884, 892, 896, 899}
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 208, 213, 220, 227, 233, 242, 250, 258, 264, 268, 277, 286, 293, 297, 300, 307, 315, 322, 328, 331, 337, 344, 352, 356, 363, 371, 373, 375, 377, 379, 381, 383, 388, 393, 401, 404, 413, 416, 420, 428, 435, 444, 457, 460, 463, 466, 469, 472, 475, 481, 484, 487, 493, 497, 500, 504, 509, 514, 520, 525, 529, 534, 542, 550, 556, 565, 576, 583, 592, 596, 603, 611, 615, 619, 623, 630, 637, 645, 651, 660, 671, 679, 688, 693, 698, 702, 710, 715, 719, 722, 730, 734, 736, 741, 743, 748, 754, 760, 766, 772, 780, 785, 789, 796, 802, 807, 813, 819, 826, 831, 835, 840, 844, 849, 857, 863, 870, 877, 883, 890, 903, 917, 932, 943, 951, 955, 966, 977, 980}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {

View file

@ -32,7 +32,4 @@ type Package struct {
// Exported (or re-exported) symbols.
Exports []*Name
// Map from function names of stencils to already-created stencils.
Stencils map[*types.Sym]*Func
}

View file

@ -90,7 +90,7 @@ func (v *bottomUpVisitor) visit(n *Func) uint32 {
if n := n.(*Name); n.Class == PFUNC {
do(n.Defn)
}
case ODOTMETH, OCALLPART, OMETHEXPR:
case ODOTMETH, OMETHVALUE, OMETHEXPR:
if fn := MethodExprName(n); fn != nil {
do(fn.Defn)
}

View file

@ -300,11 +300,36 @@ func (n *typeNode) CanBeNtype() {}
// TypeNode returns the Node representing the type t.
func TypeNode(t *types.Type) Ntype {
return TypeNodeAt(src.NoXPos, t)
}
// TypeNodeAt is like TypeNode, but allows specifying the position
// information if a new OTYPE needs to be constructed.
//
// Deprecated: Use TypeNode instead. For typical use, the position for
// an anonymous OTYPE node should not matter. However, TypeNodeAt is
// available for use with toolstash -cmp to refactor existing code
// that is sensitive to OTYPE position.
func TypeNodeAt(pos src.XPos, t *types.Type) Ntype {
if n := t.Obj(); n != nil {
if n.Type() != t {
base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
}
return n.(Ntype)
}
return newTypeNode(src.NoXPos, t)
return newTypeNode(pos, t)
}
// A DynamicType represents the target type in a type switch.
type DynamicType struct {
miniExpr
X Node // a *runtime._type for the targeted type
ITab Node // for type switches from nonempty interfaces to non-interfaces, this is the itab for that pair.
}
func NewDynamicType(pos src.XPos, x Node) *DynamicType {
n := &DynamicType{X: x}
n.pos = pos
n.op = ODYNAMICTYPE
return n
}

View file

@ -66,7 +66,7 @@ func Float64Val(v constant.Value) float64 {
func AssertValidTypeForConst(t *types.Type, v constant.Value) {
if !ValidTypeForConst(t, v) {
base.Fatalf("%v does not represent %v", t, v)
base.Fatalf("%v (%v) does not represent %v (%v)", t, t.Kind(), v, v.Kind())
}
}

View file

@ -1082,6 +1082,10 @@ func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
if lv.fn.Wrapper() || lv.fn.Dupok() {
// Skip reporting liveness information for compiler-generated wrappers.
return
}
if !(v == nil || v.Op.IsCall()) {
// Historically we only printed this information at
// calls. Keep doing so.

View file

@ -209,7 +209,7 @@ func s15a8(x *[15]int64) [15]int64 {
want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`)
want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`)
// escape analysis explanation
want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r2 with derefs=0",`+
want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r0 with derefs=0",`+
`"relatedInformation":[`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
@ -220,8 +220,8 @@ func s15a8(x *[15]int64) [15]int64 {
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u0026y.b (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`)
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~R0:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return ~R0 (return)"}]}`)
})
}

View file

@ -21,7 +21,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = (buildcfg.GOMIPS == "softfloat")
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock

View file

@ -21,7 +21,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = buildcfg.GOMIPS64 == "softfloat"
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue

View file

@ -0,0 +1,124 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
type code interface {
marker() syncMarker
value() int
}
type codeVal int
func (c codeVal) marker() syncMarker { return syncVal }
func (c codeVal) value() int { return int(c) }
const (
valBool codeVal = iota
valString
valInt64
valBigInt
valBigRat
valBigFloat
)
type codeType int
func (c codeType) marker() syncMarker { return syncType }
func (c codeType) value() int { return int(c) }
const (
typeBasic codeType = iota
typeNamed
typePointer
typeSlice
typeArray
typeChan
typeMap
typeSignature
typeStruct
typeInterface
typeUnion
typeTypeParam
)
type codeObj int
func (c codeObj) marker() syncMarker { return syncCodeObj }
func (c codeObj) value() int { return int(c) }
const (
objAlias codeObj = iota
objConst
objType
objFunc
objVar
objStub
)
type codeStmt int
func (c codeStmt) marker() syncMarker { return syncStmt1 }
func (c codeStmt) value() int { return int(c) }
const (
stmtEnd codeStmt = iota
stmtLabel
stmtBlock
stmtExpr
stmtSend
stmtAssign
stmtAssignOp
stmtIncDec
stmtBranch
stmtCall
stmtReturn
stmtIf
stmtFor
stmtSwitch
stmtSelect
// TODO(mdempsky): Remove after we don't care about toolstash -cmp.
stmtTypeDeclHack
)
type codeExpr int
func (c codeExpr) marker() syncMarker { return syncExpr }
func (c codeExpr) value() int { return int(c) }
// TODO(mdempsky): Split expr into addr, for lvalues.
const (
exprNone codeExpr = iota
exprConst
exprType // type expression
exprLocal // local variable
exprName // global variable or function
exprBlank
exprCompLit
exprFuncLit
exprSelector
exprIndex
exprSlice
exprAssert
exprUnaryOp
exprBinaryOp
exprCall
exprConvert
)
type codeDecl int
func (c codeDecl) marker() syncMarker { return syncDecl }
func (c codeDecl) value() int { return int(c) }
const (
declEnd codeDecl = iota
declFunc
declMethod
declVar
declOther
)

View file

@ -41,20 +41,27 @@ func (g *irgen) decls(decls []syntax.Decl) []ir.Node {
}
func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
// TODO(mdempsky): Merge with gcimports so we don't have to import
// packages twice.
g.pragmaFlags(decl.Pragma, 0)
ipkg := importfile(decl)
if ipkg == ir.Pkgs.Unsafe {
// Get the imported package's path, as resolved already by types2
// and gcimporter. This is the same path as would be computed by
// parseImportPath.
switch pkgNameOf(g.info, decl).Imported().Path() {
case "unsafe":
p.importedUnsafe = true
}
if ipkg.Path == "embed" {
case "embed":
p.importedEmbed = true
}
}
// pkgNameOf returns the PkgName associated with the given ImportDecl.
func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
if name := decl.LocalPkgName; name != nil {
return info.Defs[name].(*types2.PkgName)
}
return info.Implicits[decl].(*types2.PkgName)
}
func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
g.pragmaFlags(decl.Pragma, 0)
@ -95,7 +102,21 @@ func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
g.target.Inits = append(g.target.Inits, fn)
}
if fn.Type().HasTParam() {
g.topFuncIsGeneric = true
}
g.funcBody(fn, decl.Recv, decl.Type, decl.Body)
g.topFuncIsGeneric = false
if fn.Type().HasTParam() && fn.Body != nil {
// Set pointers to the dcls/body of a generic function/method in
// the Inl struct, so it is marked for export, is available for
// stenciling, and works with Inline_Flood().
fn.Inl = &ir.Inline{
Cost: 1,
Dcl: fn.Dcl,
Body: fn.Body,
}
}
out.Append(fn)
}
@ -104,13 +125,7 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
if decl.Alias {
name, _ := g.def(decl.Name)
g.pragmaFlags(decl.Pragma, 0)
// TODO(mdempsky): This matches how typecheckdef marks aliases for
// export, but this won't generalize to exporting function-scoped
// type aliases. We should maybe just use n.Alias() instead.
if ir.CurFunc == nil {
name.Sym().Def = ir.TypeNode(name.Type())
}
assert(name.Alias()) // should be set by irgen.obj
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
return
@ -154,11 +169,15 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
// [mdempsky: Subtleties like these are why I always vehemently
// object to new type pragmas.]
ntyp.SetUnderlying(g.typeExpr(decl.Type))
if len(decl.TParamList) > 0 {
// Set HasTParam if there are any tparams, even if no tparams are
// used in the type itself (e.g., if it is an empty struct, or no
// fields in the struct use the tparam).
ntyp.SetHasTParam(true)
tparams := otyp.(*types2.Named).TParams()
if n := tparams.Len(); n > 0 {
rparams := make([]*types.Type, n)
for i := range rparams {
rparams[i] = g.typ(tparams.At(i).Type())
}
// This will set hasTParam flag if any rparams are not concrete types.
ntyp.SetRParams(rparams)
}
types.ResumeCheckSize()

View file

@ -0,0 +1,301 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"encoding/binary"
"fmt"
"go/constant"
"go/token"
"math/big"
"os"
"runtime"
"strings"
"cmd/compile/internal/base"
)
type pkgDecoder struct {
pkgPath string
elemEndsEnds [numRelocs]uint32
elemEnds []uint32
elemData string
}
func newPkgDecoder(pkgPath, input string) pkgDecoder {
pr := pkgDecoder{
pkgPath: pkgPath,
}
// TODO(mdempsky): Implement direct indexing of input string to
// avoid copying the position information.
r := strings.NewReader(input)
assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
pos, err := r.Seek(0, os.SEEK_CUR)
assert(err == nil)
pr.elemData = input[pos:]
assert(len(pr.elemData) == int(pr.elemEnds[len(pr.elemEnds)-1]))
return pr
}
func (pr *pkgDecoder) numElems(k reloc) int {
count := int(pr.elemEndsEnds[k])
if k > 0 {
count -= int(pr.elemEndsEnds[k-1])
}
return count
}
func (pr *pkgDecoder) totalElems() int {
return len(pr.elemEnds)
}
func (pr *pkgDecoder) absIdx(k reloc, idx int) int {
absIdx := idx
if k > 0 {
absIdx += int(pr.elemEndsEnds[k-1])
}
if absIdx >= int(pr.elemEndsEnds[k]) {
base.Fatalf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
}
return absIdx
}
func (pr *pkgDecoder) dataIdx(k reloc, idx int) string {
absIdx := pr.absIdx(k, idx)
var start uint32
if absIdx > 0 {
start = pr.elemEnds[absIdx-1]
}
end := pr.elemEnds[absIdx]
return pr.elemData[start:end]
}
func (pr *pkgDecoder) stringIdx(idx int) string {
return pr.dataIdx(relocString, idx)
}
func (pr *pkgDecoder) newDecoder(k reloc, idx int, marker syncMarker) decoder {
r := pr.newDecoderRaw(k, idx)
r.sync(marker)
return r
}
func (pr *pkgDecoder) newDecoderRaw(k reloc, idx int) decoder {
r := decoder{
common: pr,
k: k,
idx: idx,
}
// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
r.data = *strings.NewReader(pr.dataIdx(k, idx))
r.sync(syncRelocs)
r.relocs = make([]relocEnt, r.len())
for i := range r.relocs {
r.sync(syncReloc)
r.relocs[i] = relocEnt{reloc(r.len()), r.len()}
}
return r
}
type decoder struct {
common *pkgDecoder
relocs []relocEnt
data strings.Reader
k reloc
idx int
}
func (r *decoder) checkErr(err error) {
if err != nil {
base.Fatalf("unexpected error: %v", err)
}
}
func (r *decoder) rawUvarint() uint64 {
x, err := binary.ReadUvarint(&r.data)
r.checkErr(err)
return x
}
func (r *decoder) rawVarint() int64 {
ux := r.rawUvarint()
// Zig-zag decode.
x := int64(ux >> 1)
if ux&1 != 0 {
x = ^x
}
return x
}
func (r *decoder) rawReloc(k reloc, idx int) int {
e := r.relocs[idx]
assert(e.kind == k)
return e.idx
}
func (r *decoder) sync(mWant syncMarker) {
if !enableSync {
return
}
pos, _ := r.data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved
mHave := syncMarker(r.rawUvarint())
writerPCs := make([]int, r.rawUvarint())
for i := range writerPCs {
writerPCs[i] = int(r.rawUvarint())
}
if mHave == mWant {
return
}
// There's some tension here between printing:
//
// (1) full file paths that tools can recognize (e.g., so emacs
// hyperlinks the "file:line" text for easy navigation), or
//
// (2) short file paths that are easier for humans to read (e.g., by
// omitting redundant or irrelevant details, so it's easier to
// focus on the useful bits that remain).
//
// The current formatting favors the former, as it seems more
// helpful in practice. But perhaps the formatting could be improved
// to better address both concerns. For example, use relative file
// paths if they would be shorter, or rewrite file paths to contain
// "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
// to reliably expand that again.
fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.idx, pos)
fmt.Printf("\nfound %v, written at:\n", mHave)
if len(writerPCs) == 0 {
fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
}
for _, pc := range writerPCs {
fmt.Printf("\t%s\n", r.common.stringIdx(r.rawReloc(relocString, pc)))
}
fmt.Printf("\nexpected %v, reading at:\n", mWant)
var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
n := runtime.Callers(2, readerPCs[:])
for _, pc := range fmtFrames(readerPCs[:n]...) {
fmt.Printf("\t%s\n", pc)
}
// We already printed a stack trace for the reader, so now we can
// simply exit. Printing a second one with panic or base.Fatalf
// would just be noise.
os.Exit(1)
}
func (r *decoder) bool() bool {
r.sync(syncBool)
x, err := r.data.ReadByte()
r.checkErr(err)
assert(x < 2)
return x != 0
}
func (r *decoder) int64() int64 {
r.sync(syncInt64)
return r.rawVarint()
}
func (r *decoder) uint64() uint64 {
r.sync(syncUint64)
return r.rawUvarint()
}
func (r *decoder) len() int { x := r.uint64(); v := int(x); assert(uint64(v) == x); return v }
func (r *decoder) int() int { x := r.int64(); v := int(x); assert(int64(v) == x); return v }
func (r *decoder) uint() uint { x := r.uint64(); v := uint(x); assert(uint64(v) == x); return v }
func (r *decoder) code(mark syncMarker) int {
r.sync(mark)
return r.len()
}
func (r *decoder) reloc(k reloc) int {
r.sync(syncUseReloc)
return r.rawReloc(k, r.len())
}
func (r *decoder) string() string {
r.sync(syncString)
return r.common.stringIdx(r.reloc(relocString))
}
func (r *decoder) strings() []string {
res := make([]string, r.len())
for i := range res {
res[i] = r.string()
}
return res
}
func (r *decoder) rawValue() constant.Value {
isComplex := r.bool()
val := r.scalar()
if isComplex {
val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
}
return val
}
func (r *decoder) scalar() constant.Value {
switch tag := codeVal(r.code(syncVal)); tag {
default:
panic(fmt.Sprintf("unexpected scalar tag: %v", tag))
case valBool:
return constant.MakeBool(r.bool())
case valString:
return constant.MakeString(r.string())
case valInt64:
return constant.MakeInt64(r.int64())
case valBigInt:
return constant.Make(r.bigInt())
case valBigRat:
num := r.bigInt()
denom := r.bigInt()
return constant.Make(new(big.Rat).SetFrac(num, denom))
case valBigFloat:
return constant.Make(r.bigFloat())
}
}
func (r *decoder) bigInt() *big.Int {
v := new(big.Int).SetBytes([]byte(r.string()))
if r.bool() {
v.Neg(v)
}
return v
}
func (r *decoder) bigFloat() *big.Float {
v := new(big.Float).SetPrec(512)
assert(v.UnmarshalText([]byte(r.string())) == nil)
return v
}

View file

@ -0,0 +1,284 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"bytes"
"encoding/binary"
"fmt"
"go/constant"
"io"
"math/big"
"runtime"
"cmd/compile/internal/base"
)
type pkgEncoder struct {
elems [numRelocs][]string
stringsIdx map[string]int
}
func newPkgEncoder() pkgEncoder {
return pkgEncoder{
stringsIdx: make(map[string]int),
}
}
func (pw *pkgEncoder) dump(out io.Writer) {
writeUint32 := func(x uint32) {
assert(binary.Write(out, binary.LittleEndian, x) == nil)
}
var sum uint32
for _, elems := range &pw.elems {
sum += uint32(len(elems))
writeUint32(sum)
}
sum = 0
for _, elems := range &pw.elems {
for _, elem := range elems {
sum += uint32(len(elem))
writeUint32(sum)
}
}
for _, elems := range &pw.elems {
for _, elem := range elems {
_, err := io.WriteString(out, elem)
assert(err == nil)
}
}
}
func (pw *pkgEncoder) stringIdx(s string) int {
if idx, ok := pw.stringsIdx[s]; ok {
assert(pw.elems[relocString][idx] == s)
return idx
}
idx := len(pw.elems[relocString])
pw.elems[relocString] = append(pw.elems[relocString], s)
pw.stringsIdx[s] = idx
return idx
}
func (pw *pkgEncoder) newEncoder(k reloc, marker syncMarker) encoder {
e := pw.newEncoderRaw(k)
e.sync(marker)
return e
}
func (pw *pkgEncoder) newEncoderRaw(k reloc) encoder {
idx := len(pw.elems[k])
pw.elems[k] = append(pw.elems[k], "") // placeholder
return encoder{
p: pw,
k: k,
idx: idx,
}
}
// Encoders
type encoder struct {
p *pkgEncoder
relocs []relocEnt
data bytes.Buffer
encodingRelocHeader bool
k reloc
idx int
}
func (w *encoder) flush() int {
var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
// Backup the data so we write the relocations at the front.
var tmp bytes.Buffer
io.Copy(&tmp, &w.data)
// TODO(mdempsky): Consider writing these out separately so they're
// easier to strip, along with function bodies, so that we can prune
// down to just the data that's relevant to go/types.
if w.encodingRelocHeader {
base.Fatalf("encodingRelocHeader already true; recursive flush?")
}
w.encodingRelocHeader = true
w.sync(syncRelocs)
w.len(len(w.relocs))
for _, rent := range w.relocs {
w.sync(syncReloc)
w.len(int(rent.kind))
w.len(rent.idx)
}
io.Copy(&sb, &w.data)
io.Copy(&sb, &tmp)
w.p.elems[w.k][w.idx] = sb.String()
return w.idx
}
func (w *encoder) checkErr(err error) {
if err != nil {
base.Fatalf("unexpected error: %v", err)
}
}
func (w *encoder) rawUvarint(x uint64) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutUvarint(buf[:], x)
_, err := w.data.Write(buf[:n])
w.checkErr(err)
}
func (w *encoder) rawVarint(x int64) {
// Zig-zag encode.
ux := uint64(x) << 1
if x < 0 {
ux = ^ux
}
w.rawUvarint(ux)
}
func (w *encoder) rawReloc(r reloc, idx int) int {
// TODO(mdempsky): Use map for lookup.
for i, rent := range w.relocs {
if rent.kind == r && rent.idx == idx {
return i
}
}
i := len(w.relocs)
w.relocs = append(w.relocs, relocEnt{r, idx})
return i
}
func (w *encoder) sync(m syncMarker) {
if !enableSync {
return
}
// Writing out stack frame string references requires working
// relocations, but writing out the relocations themselves involves
// sync markers. To prevent infinite recursion, we simply trim the
// stack frame for sync markers within the relocation header.
var frames []string
if !w.encodingRelocHeader && base.Debug.SyncFrames > 0 {
pcs := make([]uintptr, base.Debug.SyncFrames)
n := runtime.Callers(2, pcs)
frames = fmtFrames(pcs[:n]...)
}
// TODO(mdempsky): Save space by writing out stack frames as a
// linked list so we can share common stack frames.
w.rawUvarint(uint64(m))
w.rawUvarint(uint64(len(frames)))
for _, frame := range frames {
w.rawUvarint(uint64(w.rawReloc(relocString, w.p.stringIdx(frame))))
}
}
func (w *encoder) bool(b bool) bool {
w.sync(syncBool)
var x byte
if b {
x = 1
}
err := w.data.WriteByte(x)
w.checkErr(err)
return b
}
func (w *encoder) int64(x int64) {
w.sync(syncInt64)
w.rawVarint(x)
}
func (w *encoder) uint64(x uint64) {
w.sync(syncUint64)
w.rawUvarint(x)
}
func (w *encoder) len(x int) { assert(x >= 0); w.uint64(uint64(x)) }
func (w *encoder) int(x int) { w.int64(int64(x)) }
func (w *encoder) uint(x uint) { w.uint64(uint64(x)) }
func (w *encoder) reloc(r reloc, idx int) {
w.sync(syncUseReloc)
w.len(w.rawReloc(r, idx))
}
func (w *encoder) code(c code) {
w.sync(c.marker())
w.len(c.value())
}
func (w *encoder) string(s string) {
w.sync(syncString)
w.reloc(relocString, w.p.stringIdx(s))
}
func (w *encoder) strings(ss []string) {
w.len(len(ss))
for _, s := range ss {
w.string(s)
}
}
func (w *encoder) rawValue(val constant.Value) {
if w.bool(val.Kind() == constant.Complex) {
w.scalar(constant.Real(val))
w.scalar(constant.Imag(val))
} else {
w.scalar(val)
}
}
func (w *encoder) scalar(val constant.Value) {
switch v := constant.Val(val).(type) {
default:
panic(fmt.Sprintf("unhandled %v (%v)", val, val.Kind()))
case bool:
w.code(valBool)
w.bool(v)
case string:
w.code(valString)
w.string(v)
case int64:
w.code(valInt64)
w.int64(v)
case *big.Int:
w.code(valBigInt)
w.bigInt(v)
case *big.Rat:
w.code(valBigRat)
w.bigInt(v.Num())
w.bigInt(v.Denom())
case *big.Float:
w.code(valBigFloat)
w.bigFloat(v)
}
}
func (w *encoder) bigInt(v *big.Int) {
b := v.Bytes()
w.string(string(b)) // TODO: More efficient encoding.
w.bool(v.Sign() < 0)
}
func (w *encoder) bigFloat(v *big.Float) {
b := v.Append(nil, 'p', -1)
w.string(string(b)) // TODO: More efficient encoding.
}

View file

@ -0,0 +1,65 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"bytes"
"fmt"
"io"
"cmd/compile/internal/base"
"cmd/compile/internal/typecheck"
"cmd/internal/bio"
)
// writeNewExportFunc is a hook that can be added to append extra
// export data after the normal export data section. It allows
// experimenting with new export data format designs without requiring
// immediate support in the go/internal or x/tools importers.
var writeNewExportFunc func(out io.Writer)
func WriteExports(out *bio.Writer) {
// When unified IR exports are enable, we simply append it to the
// end of the normal export data (with compiler extensions
// disabled), and write an extra header giving its size.
//
// If the compiler sees this header, it knows to read the new data
// instead; meanwhile the go/types importers will silently ignore it
// and continue processing the old export instead.
//
// This allows us to experiment with changes to the new export data
// format without needing to update the go/internal/gcimporter or
// (worse) x/tools/go/gcexportdata.
useNewExport := writeNewExportFunc != nil
var old, new bytes.Buffer
typecheck.WriteExports(&old, !useNewExport)
if useNewExport {
writeNewExportFunc(&new)
}
oldLen := old.Len()
newLen := new.Len()
if useNewExport {
fmt.Fprintf(out, "\nnewexportsize %v\n", newLen)
}
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
out.WriteString("\n$$B\n") // indicate binary export format
io.Copy(out, &old)
out.WriteString("\n$$\n")
io.Copy(out, &new)
if base.Debug.Export != 0 {
fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, oldLen)
if useNewExport {
fmt.Printf("BenchmarkNewExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, newLen)
}
}
}

View file

@ -5,6 +5,8 @@
package noder
import (
"fmt"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
@ -15,6 +17,8 @@ import (
)
func (g *irgen) expr(expr syntax.Expr) ir.Node {
expr = unparen(expr) // skip parens; unneeded after parse+typecheck
if expr == nil {
return nil
}
@ -67,14 +71,16 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node {
// Constant expression.
if tv.Value != nil {
return Const(g.pos(expr), g.typ(typ), tv.Value)
typ := g.typ(typ)
value := FixValue(typ, tv.Value)
return OrigConst(g.pos(expr), typ, value, constExprOp(expr), syntax.String(expr))
}
n := g.expr0(typ, expr)
if n.Typecheck() != 1 && n.Typecheck() != 3 {
base.FatalfAt(g.pos(expr), "missed typecheck: %+v", n)
}
if !g.match(n.Type(), typ, tv.HasOk()) {
if n.Op() != ir.OFUNCINST && !g.match(n.Type(), typ, tv.HasOk()) {
base.FatalfAt(g.pos(expr), "expected %L to have type %v", n, typ)
}
return n
@ -82,6 +88,11 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node {
func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
pos := g.pos(expr)
assert(pos.IsKnown())
// Set base.Pos for transformation code that still uses base.Pos, rather than
// the pos of the node being converted.
base.Pos = pos
switch expr := expr.(type) {
case *syntax.Name:
@ -105,23 +116,30 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
// The key for the Inferred map is the CallExpr (if inferring
// types required the function arguments) or the IndexExpr below
// (if types could be inferred without the function arguments).
if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 {
if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.TArgs) > 0 {
// This is the case where inferring types required the
// types of the function arguments.
targs := make([]ir.Node, len(inferred.Targs))
for i, targ := range inferred.Targs {
targs := make([]ir.Node, len(inferred.TArgs))
for i, targ := range inferred.TArgs {
targs[i] = ir.TypeNode(g.typ(targ))
}
if fun.Op() == ir.OFUNCINST {
// Replace explicit type args with the full list that
// includes the additional inferred type args
// includes the additional inferred type args.
// Substitute the type args for the type params in
// the generic function's type.
fun.(*ir.InstExpr).Targs = targs
newt := g.substType(fun.Type(), fun.Type().TParams(), targs)
typed(newt, fun)
} else {
// Create a function instantiation here, given
// there are only inferred type args (e.g.
// min(5,6), where min is a generic function)
// Create a function instantiation here, given there
// are only inferred type args (e.g. min(5,6), where
// min is a generic function). Substitute the type
// args for the type params in the generic function's
// type.
inst := ir.NewInstExpr(pos, ir.OFUNCINST, fun, targs)
typed(fun.Type(), inst)
newt := g.substType(fun.Type(), fun.Type().TParams(), targs)
typed(newt, inst)
fun = inst
}
@ -131,12 +149,12 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
case *syntax.IndexExpr:
var targs []ir.Node
if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 {
if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.TArgs) > 0 {
// This is the partial type inference case where the types
// can be inferred from other type arguments without using
// the types of the function arguments.
targs = make([]ir.Node, len(inferred.Targs))
for i, targ := range inferred.Targs {
targs = make([]ir.Node, len(inferred.TArgs))
for i, targ := range inferred.TArgs {
targs[i] = ir.TypeNode(g.typ(targ))
}
} else if _, ok := expr.Index.(*syntax.ListExpr); ok {
@ -158,12 +176,16 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
panic("Incorrect argument for generic func instantiation")
}
n := ir.NewInstExpr(pos, ir.OFUNCINST, x, targs)
typed(g.typ(typ), n)
newt := g.typ(typ)
// Substitute the type args for the type params in the uninstantiated
// function's type. If there aren't enough type args, then the rest
// will be inferred at the call node, so don't try the substitution yet.
if x.Type().TParams().NumFields() == len(targs) {
newt = g.substType(g.typ(typ), x.Type().TParams(), targs)
}
typed(newt, n)
return n
case *syntax.ParenExpr:
return g.expr(expr.X) // skip parens; unneeded after parse+typecheck
case *syntax.SelectorExpr:
// Qualified identifier.
if name, ok := expr.X.(*syntax.Name); ok {
@ -193,7 +215,29 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
}
}
// selectorExpr resolves the choice of ODOT, ODOTPTR, OCALLPART (eventually
// substType does a normal type substition, but tparams is in the form of a field
// list, and targs is in terms of a slice of type nodes. substType records any newly
// instantiated types into g.instTypeList.
func (g *irgen) substType(typ *types.Type, tparams *types.Type, targs []ir.Node) *types.Type {
fields := tparams.FieldSlice()
tparams1 := make([]*types.Type, len(fields))
for i, f := range fields {
tparams1[i] = f.Type
}
targs1 := make([]*types.Type, len(targs))
for i, n := range targs {
targs1[i] = n.Type()
}
ts := typecheck.Tsubster{
Tparams: tparams1,
Targs: targs1,
}
newt := ts.Typ(typ)
g.instTypeList = append(g.instTypeList, ts.InstTypeList...)
return newt
}
// selectorExpr resolves the choice of ODOT, ODOTPTR, OMETHVALUE (eventually
// ODOTMETH & ODOTINTER), and OMETHEXPR and deals with embedded fields here rather
// than in typecheck.go.
func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.SelectorExpr) ir.Node {
@ -203,6 +247,44 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
// only be fully transformed once it has an instantiated type.
n := ir.NewSelectorExpr(pos, ir.OXDOT, x, typecheck.Lookup(expr.Sel.Value))
typed(g.typ(typ), n)
// Fill in n.Selection for a generic method reference or a bound
// interface method, even though we won't use it directly, since it
// is useful for analysis. Specifically do not fill in for fields or
// other interfaces methods (method call on an interface value), so
// n.Selection being non-nil means a method reference for a generic
// type or a method reference due to a bound.
obj2 := g.info.Selections[expr].Obj()
sig := types2.AsSignature(obj2.Type())
if sig == nil || sig.Recv() == nil {
return n
}
index := g.info.Selections[expr].Index()
last := index[len(index)-1]
// recvType is the receiver of the method being called. Because of the
// way methods are imported, g.obj(obj2) doesn't work across
// packages, so we have to lookup the method via the receiver type.
recvType := deref2(sig.Recv().Type())
if types2.AsInterface(recvType.Underlying()) != nil {
fieldType := n.X.Type()
for _, ix := range index[:len(index)-1] {
fieldType = fieldType.Field(ix).Type
}
if fieldType.Kind() == types.TTYPEPARAM {
n.Selection = fieldType.Bound().AllMethods().Index(last)
//fmt.Printf(">>>>> %v: Bound call %v\n", base.FmtPos(pos), n.Sel)
} else {
assert(fieldType.Kind() == types.TINTER)
//fmt.Printf(">>>>> %v: Interface call %v\n", base.FmtPos(pos), n.Sel)
}
return n
}
recvObj := types2.AsNamed(recvType).Obj()
recv := g.pkg(recvObj.Pkg()).Lookup(recvObj.Name()).Def
n.Selection = recv.Type().Methods().Index(last)
//fmt.Printf(">>>>> %v: Method call %v\n", base.FmtPos(pos), n.Sel)
return n
}
@ -259,14 +341,18 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
if wantPtr {
recvType2Base = types2.AsPointer(recvType2).Elem()
}
if len(types2.AsNamed(recvType2Base).TParams()) > 0 {
if types2.AsNamed(recvType2Base).TParams().Len() > 0 {
// recvType2 is the original generic type that is
// instantiated for this method call.
// selinfo.Recv() is the instantiated type
recvType2 = recvType2Base
// method is the generic method associated with the gen type
method := g.obj(types2.AsNamed(recvType2).Method(last))
n = ir.NewSelectorExpr(pos, ir.OCALLPART, x, method.Sym())
recvTypeSym := g.pkg(method2.Pkg()).Lookup(recvType2.(*types2.Named).Obj().Name())
recvType := recvTypeSym.Def.(*ir.Name).Type()
// method is the generic method associated with
// the base generic type. The instantiated type may not
// have method bodies filled in, if it was imported.
method := recvType.Methods().Index(last).Nname.(*ir.Name)
n = ir.NewSelectorExpr(pos, ir.OMETHVALUE, x, typecheck.Lookup(expr.Sel.Value))
n.(*ir.SelectorExpr).Selection = types.NewField(pos, method.Sym(), method.Type())
n.(*ir.SelectorExpr).Selection.Nname = method
typed(method.Type(), n)
@ -301,10 +387,7 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
// getTargs gets the targs associated with the receiver of a selected method
func getTargs(selinfo *types2.Selection) []types2.Type {
r := selinfo.Recv()
if p := types2.AsPointer(r); p != nil {
r = p.Elem()
}
r := deref2(selinfo.Recv())
n := types2.AsNamed(r)
if n == nil {
base.Fatalf("Incorrect type for selinfo %v", selinfo)
@ -313,13 +396,17 @@ func getTargs(selinfo *types2.Selection) []types2.Type {
}
func (g *irgen) exprList(expr syntax.Expr) []ir.Node {
return g.exprs(unpackListExpr(expr))
}
func unpackListExpr(expr syntax.Expr) []syntax.Expr {
switch expr := expr.(type) {
case nil:
return nil
case *syntax.ListExpr:
return g.exprs(expr.ElemList)
return expr.ElemList
default:
return []ir.Node{g.expr(expr)}
return []syntax.Expr{expr}
}
}
@ -344,11 +431,13 @@ func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
for i, elem := range lit.ElemList {
switch elem := elem.(type) {
case *syntax.KeyValueExpr:
var key ir.Node
if isStruct {
exprs[i] = ir.NewStructKeyExpr(g.pos(elem), g.name(elem.Key.(*syntax.Name)), g.expr(elem.Value))
key = ir.NewIdent(g.pos(elem.Key), g.name(elem.Key.(*syntax.Name)))
} else {
exprs[i] = ir.NewKeyExpr(g.pos(elem), g.expr(elem.Key), g.expr(elem.Value))
key = g.expr(elem.Key)
}
exprs[i] = ir.NewKeyExpr(g.pos(elem), key, g.expr(elem.Value))
default:
exprs[i] = g.expr(elem)
}
@ -360,19 +449,13 @@ func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
}
func (g *irgen) funcLit(typ2 types2.Type, expr *syntax.FuncLit) ir.Node {
fn := ir.NewFunc(g.pos(expr))
fn.SetIsHiddenClosure(ir.CurFunc != nil)
fn := ir.NewClosureFunc(g.pos(expr), ir.CurFunc != nil)
ir.NameClosure(fn.OClosure, ir.CurFunc)
fn.Nname = ir.NewNameAt(g.pos(expr), typecheck.ClosureName(ir.CurFunc))
ir.MarkFunc(fn.Nname)
typ := g.typ(typ2)
fn.Nname.Func = fn
fn.Nname.Defn = fn
typed(typ, fn.Nname)
fn.SetTypecheck(1)
fn.OClosure = ir.NewClosureExpr(g.pos(expr), fn)
typed(typ, fn.OClosure)
fn.SetTypecheck(1)
g.funcBody(fn, nil, expr.Type, expr.Body)
@ -386,9 +469,14 @@ func (g *irgen) funcLit(typ2 types2.Type, expr *syntax.FuncLit) ir.Node {
cv.SetWalkdef(1)
}
g.target.Decls = append(g.target.Decls, fn)
return fn.OClosure
if g.topFuncIsGeneric {
// Don't add any closure inside a generic function/method to the
// g.target.Decls list, even though it may not be generic itself.
// See issue #47514.
return ir.UseClosure(fn.OClosure, nil)
} else {
return ir.UseClosure(fn.OClosure, g.target)
}
}
func (g *irgen) typeExpr(typ syntax.Expr) *types.Type {
@ -398,3 +486,35 @@ func (g *irgen) typeExpr(typ syntax.Expr) *types.Type {
}
return n.Type()
}
// constExprOp returns an ir.Op that represents the outermost
// operation of the given constant expression. It's intended for use
// with ir.RawOrigExpr.
func constExprOp(expr syntax.Expr) ir.Op {
switch expr := expr.(type) {
default:
panic(fmt.Sprintf("%s: unexpected expression: %T", expr.Pos(), expr))
case *syntax.BasicLit:
return ir.OLITERAL
case *syntax.Name, *syntax.SelectorExpr:
return ir.ONAME
case *syntax.CallExpr:
return ir.OCALL
case *syntax.Operation:
if expr.Y == nil {
return unOps[expr.Op]
}
return binOps[expr.Op]
}
}
func unparen(expr syntax.Expr) syntax.Expr {
for {
paren, ok := expr.(*syntax.ParenExpr)
if !ok {
return expr
}
expr = paren.X
}
}

View file

@ -0,0 +1,20 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
// TODO(mdempsky): Remove after #44505 is resolved
package noder
import "runtime"
func walkFrames(pcs []uintptr, visit frameVisitor) {
for _, pc := range pcs {
fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc)
visit(file, line, fn.Name(), pc-fn.Entry())
}
}

View file

@ -0,0 +1,24 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package noder
import "runtime"
func walkFrames(pcs []uintptr, visit frameVisitor) {
if len(pcs) == 0 {
return
}
frames := runtime.CallersFrames(pcs)
for {
frame, more := frames.Next()
visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
if !more {
return
}
}
}

View file

@ -43,6 +43,32 @@ func Const(pos src.XPos, typ *types.Type, val constant.Value) ir.Node {
return typed(typ, ir.NewBasicLit(pos, val))
}
func OrigConst(pos src.XPos, typ *types.Type, val constant.Value, op ir.Op, raw string) ir.Node {
orig := ir.NewRawOrigExpr(pos, op, raw)
return ir.NewConstExpr(val, typed(typ, orig))
}
// FixValue returns val after converting and truncating it as
// appropriate for typ.
func FixValue(typ *types.Type, val constant.Value) constant.Value {
assert(typ.Kind() != types.TFORW)
switch {
case typ.IsInteger():
val = constant.ToInt(val)
case typ.IsFloat():
val = constant.ToFloat(val)
case typ.IsComplex():
val = constant.ToComplex(val)
}
if !typ.IsUntyped() {
val = typecheck.DefaultLit(ir.NewBasicLit(src.NoXPos, val), typ).Val()
}
if !typ.IsTypeParam() {
ir.AssertValidTypeForConst(typ, val)
}
return val
}
func Nil(pos src.XPos, typ *types.Type) ir.Node {
return typed(typ, ir.NewNilExpr(pos))
}
@ -87,15 +113,15 @@ func Binary(pos src.XPos, op ir.Op, typ *types.Type, x, y ir.Node) ir.Node {
func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool) ir.Node {
n := ir.NewCallExpr(pos, ir.OCALL, fun, args)
n.IsDDD = dots
// n.Use will be changed to ir.CallUseStmt in g.stmt() if this call is
// just a statement (any return values are ignored).
n.Use = ir.CallUseExpr
if fun.Op() == ir.OTYPE {
// Actually a type conversion, not a function call.
if fun.Type().HasTParam() || args[0].Type().HasTParam() {
// For type params, don't typecheck until we actually know
// the type.
if !fun.Type().IsInterface() &&
(fun.Type().HasTParam() || args[0].Type().HasTParam()) {
// For type params, we can transform if fun.Type() is known
// to be an interface (in which case a CONVIFACE node will be
// inserted). Otherwise, don't typecheck until we actually
// know the type.
return typed(typ, n)
}
typed(typ, n)
@ -103,22 +129,17 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool)
}
if fun, ok := fun.(*ir.Name); ok && fun.BuiltinOp != 0 {
// For Builtin ops, we currently stay with using the old
// typechecker to transform the call to a more specific expression
// and possibly use more specific ops. However, for a bunch of the
// ops, we delay doing the old typechecker if any of the args have
// type params, for a variety of reasons:
// For most Builtin ops, we delay doing transformBuiltin if any of the
// args have type params, for a variety of reasons:
//
// OMAKE: hard to choose specific ops OMAKESLICE, etc. until arg type is known
// OREAL/OIMAG: can't determine type float32/float64 until arg type know
// OLEN/OCAP: old typechecker will complain if arg is not obviously a slice/array.
// OAPPEND: old typechecker will complain if arg is not obviously slice, etc.
//
// We will eventually break out the transforming functionality
// needed for builtin's, and call it here or during stenciling, as
// appropriate.
// OMAKE: transformMake can't choose specific ops OMAKESLICE, etc.
// until arg type is known
// OREAL/OIMAG: transformRealImag can't determine type float32/float64
// until arg type known
// OAPPEND: transformAppend requires that the arg is a slice
// ODELETE: transformDelete requires that the arg is a map
switch fun.BuiltinOp {
case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OLEN, ir.OCAP, ir.OAPPEND:
case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.ODELETE:
hasTParam := false
for _, arg := range args {
if arg.Type().HasTParam() {
@ -137,10 +158,8 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool)
// Add information, now that we know that fun is actually being called.
switch fun := fun.(type) {
case *ir.ClosureExpr:
fun.Func.SetClosureCalled(true)
case *ir.SelectorExpr:
if fun.Op() == ir.OCALLPART {
if fun.Op() == ir.OMETHVALUE {
op := ir.ODOTMETH
if fun.X.Type().IsInterface() {
op = ir.ODOTINTER
@ -152,46 +171,52 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool)
}
}
if fun.Type().HasTParam() {
// If the fun arg is or has a type param, don't do any extra
if fun.Type().HasTParam() || fun.Op() == ir.OXDOT || fun.Op() == ir.OFUNCINST {
// If the fun arg is or has a type param, we can't do all the
// transformations, since we may not have needed properties yet
// (e.g. number of return values, etc). The type param is probably
// described by a structural constraint that requires it to be a
// certain function type, etc., but we don't want to analyze that.
// (e.g. number of return values, etc). The same applies if a fun
// which is an XDOT could not be transformed yet because of a generic
// type in the X of the selector expression.
//
// A function instantiation (even if fully concrete) shouldn't be
// transformed yet, because we need to add the dictionary during the
// transformation.
//
// However, if we have a function type (even though it is
// parameterized), then we can add in any needed CONVIFACE nodes via
// typecheckaste(). We need to call transformArgs() to deal first
// with the f(g(()) case where g returns multiple return values. We
// can't do anything if fun is a type param (which is probably
// described by a structural constraint)
if fun.Type().Kind() == types.TFUNC {
transformArgs(n)
typecheckaste(ir.OCALL, fun, n.IsDDD, fun.Type().Params(), n.Args, true)
}
return typed(typ, n)
}
if fun.Op() == ir.OXDOT {
if !fun.(*ir.SelectorExpr).X.Type().HasTParam() {
base.FatalfAt(pos, "Expecting type param receiver in %v", fun)
}
// For methods called in a generic function, don't do any extra
// transformations. We will do those later when we create the
// instantiated function and have the correct receiver type.
typed(typ, n)
return n
}
if fun.Op() != ir.OFUNCINST {
// If no type params, do the normal call transformations. This
// will convert OCALL to OCALLFUNC.
typed(typ, n)
transformCall(n)
return n
}
// Leave the op as OCALL, which indicates the call still needs typechecking.
// If no type params, do the normal call transformations. This
// will convert OCALL to OCALLFUNC.
typed(typ, n)
transformCall(n)
return n
}
func Compare(pos src.XPos, typ *types.Type, op ir.Op, x, y ir.Node) ir.Node {
n := ir.NewBinaryExpr(pos, op, x, y)
if x.Type().HasTParam() || y.Type().HasTParam() {
// Delay transformCompare() if either arg has a type param, since
// it needs to know the exact types to decide on any needed conversions.
n.SetType(typ)
n.SetTypecheck(3)
return n
xIsInt := x.Type().IsInterface()
yIsInt := y.Type().IsInterface()
if !(xIsInt && !yIsInt || !xIsInt && yIsInt) {
// If either arg is a type param, then we can still do the
// transformCompare() if we know that one arg is an interface
// and the other is not. Otherwise, we delay
// transformCompare(), since it needs to know the exact types
// to decide on any needed conversions.
n.SetType(typ)
n.SetTypecheck(3)
return n
}
}
typed(typ, n)
transformCompare(n)
@ -225,7 +250,7 @@ func DotMethod(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
// Method value.
typ := typecheck.NewMethodType(method.Type, nil)
return dot(pos, typ, ir.OCALLPART, x, method)
return dot(pos, typ, ir.OMETHVALUE, x, method)
}
// MethodExpr returns a OMETHEXPR node with the indicated index into the methods
@ -321,5 +346,15 @@ var one = constant.MakeInt64(1)
func IncDec(pos src.XPos, op ir.Op, x ir.Node) *ir.AssignOpStmt {
assert(x.Type() != nil)
return ir.NewAssignOpStmt(pos, op, x, typecheck.DefaultLit(ir.NewBasicLit(pos, one), x.Type()))
bl := ir.NewBasicLit(pos, one)
if x.Type().HasTParam() {
// If the operand is generic, then types2 will have proved it must be
// a type that fits with increment/decrement, so just set the type of
// "one" to n.Type(). This works even for types that are eventually
// float or complex.
typed(x.Type(), bl)
} else {
bl = typecheck.DefaultLit(bl, x.Type())
}
return ir.NewAssignOpStmt(pos, op, x, bl)
}

View file

@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"internal/buildcfg"
"io"
"os"
pathpkg "path"
"runtime"
@ -32,8 +31,24 @@ import (
"cmd/internal/src"
)
// Temporary import helper to get type2-based type-checking going.
// haveLegacyImports records whether we've imported any packages
// without a new export data section. This is useful for experimenting
// with new export data format designs, when you need to support
// existing tests that manually compile files with inconsistent
// compiler flags.
var haveLegacyImports = false
// newReadImportFunc is an extension hook for experimenting with new
// export data formats. If a new export data payload was written out
// for an imported package by overloading writeNewExportFunc, then
// that payload will be mapped into memory and passed to
// newReadImportFunc.
var newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
panic("unexpected new export data payload")
}
type gcimports struct {
check *types2.Checker
packages map[string]*types2.Package
}
@ -46,13 +61,8 @@ func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*ty
panic("mode must be 0")
}
path, err := resolveImportPath(path)
if err != nil {
return nil, err
}
lookup := func(path string) (io.ReadCloser, error) { return openPackage(path) }
return importer.Import(m.packages, path, srcDir, lookup)
_, pkg, err := readImportFile(path, typecheck.Target, m.check, m.packages)
return pkg, err
}
func isDriveLetter(b byte) bool {
@ -175,160 +185,242 @@ func resolveImportPath(path string) (string, error) {
return path, nil
}
// TODO(mdempsky): Return an error instead.
func importfile(decl *syntax.ImportDecl) *types.Pkg {
if decl.Path.Kind != syntax.StringLit {
base.Errorf("import path must be a string")
return nil
}
path, err := strconv.Unquote(decl.Path.Value)
if err != nil {
base.Errorf("import path must be a string")
return nil
}
if err := checkImportPath(path, false); err != nil {
base.Errorf("%s", err.Error())
return nil
}
path, err = resolveImportPath(path)
path, err := parseImportPath(decl.Path)
if err != nil {
base.Errorf("%s", err)
return nil
}
importpkg := types.NewPkg(path, "")
if importpkg.Direct {
return importpkg // already fully loaded
pkg, _, err := readImportFile(path, typecheck.Target, nil, nil)
if err != nil {
base.Errorf("%s", err)
return nil
}
if pkg != ir.Pkgs.Unsafe && pkg.Height >= myheight {
myheight = pkg.Height + 1
}
return pkg
}
func parseImportPath(pathLit *syntax.BasicLit) (string, error) {
if pathLit.Kind != syntax.StringLit {
return "", errors.New("import path must be a string")
}
path, err := strconv.Unquote(pathLit.Value)
if err != nil {
return "", errors.New("import path must be a string")
}
if err := checkImportPath(path, false); err != nil {
return "", err
}
return path, err
}
// readImportFile reads the import file for the given package path and
// returns its types.Pkg representation. If packages is non-nil, the
// types2.Package representation is also returned.
func readImportFile(path string, target *ir.Package, check *types2.Checker, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) {
path, err = resolveImportPath(path)
if err != nil {
return
}
importpkg.Direct = true
typecheck.Target.Imports = append(typecheck.Target.Imports, importpkg)
if path == "unsafe" {
return importpkg // initialized with universe
pkg1, pkg2 = ir.Pkgs.Unsafe, types2.Unsafe
// TODO(mdempsky): Investigate if this actually matters. Why would
// the linker or runtime care whether a package imported unsafe?
if !pkg1.Direct {
pkg1.Direct = true
target.Imports = append(target.Imports, pkg1)
}
return
}
pkg1 = types.NewPkg(path, "")
if packages != nil {
pkg2 = packages[path]
assert(pkg1.Direct == (pkg2 != nil && pkg2.Complete()))
}
if pkg1.Direct {
return
}
pkg1.Direct = true
target.Imports = append(target.Imports, pkg1)
f, err := openPackage(path)
if err != nil {
base.Errorf("could not import %q: %v", path, err)
base.ErrorExit()
return
}
imp := bio.NewReader(f)
defer imp.Close()
file := f.Name()
defer f.Close()
// check object header
p, err := imp.ReadString('\n')
r, end, newsize, err := findExportData(f)
if err != nil {
base.Errorf("import %s: reading input: %v", file, err)
base.ErrorExit()
return
}
if p == "!<arch>\n" { // package archive
// package export block should be first
sz := archive.ReadHeader(imp.Reader, "__.PKGDEF")
if sz <= 0 {
base.Errorf("import %s: not a package file", file)
base.ErrorExit()
}
p, err = imp.ReadString('\n')
if base.Debug.Export != 0 {
fmt.Printf("importing %s (%s)\n", path, f.Name())
}
if newsize != 0 {
// We have unified IR data. Map it, and feed to the importers.
end -= newsize
var data string
data, err = base.MapFile(r.File(), end, newsize)
if err != nil {
base.Errorf("import %s: reading input: %v", file, err)
base.ErrorExit()
return
}
}
if !strings.HasPrefix(p, "go object ") {
base.Errorf("import %s: not a go object file: %s", file, p)
base.ErrorExit()
}
q := objabi.HeaderString()
if p != q {
base.Errorf("import %s: object is [%s] expected [%s]", file, p, q)
base.ErrorExit()
}
pkg2, err = newReadImportFunc(data, pkg1, check, packages)
} else {
// We only have old data. Oh well, fall back to the legacy importers.
haveLegacyImports = true
// process header lines
for {
p, err = imp.ReadString('\n')
var c byte
switch c, err = r.ReadByte(); {
case err != nil:
return
case c != 'i':
// Indexed format is distinguished by an 'i' byte,
// whereas previous export formats started with 'c', 'd', or 'v'.
err = fmt.Errorf("unexpected package format byte: %v", c)
return
}
pos := r.Offset()
// Map string (and data) section into memory as a single large
// string. This reduces heap fragmentation and allows
// returning individual substrings very efficiently.
var data string
data, err = base.MapFile(r.File(), pos, end-pos)
if err != nil {
base.Errorf("import %s: reading input: %v", file, err)
base.ErrorExit()
return
}
if p == "\n" {
break // header ends with blank line
}
}
// Expect $$B\n to signal binary import format.
typecheck.ReadImports(pkg1, data)
// look for $$
var c byte
for {
c, err = imp.ReadByte()
if err != nil {
break
}
if c == '$' {
c, err = imp.ReadByte()
if c == '$' || err != nil {
break
if packages != nil {
pkg2, err = importer.ImportData(packages, data, path)
if err != nil {
return
}
}
}
// get character after $$
if err == nil {
c, _ = imp.ReadByte()
err = addFingerprint(path, f, end)
return
}
// findExportData returns a *bio.Reader positioned at the start of the
// binary export data section, and a file offset for where to stop
// reading.
func findExportData(f *os.File) (r *bio.Reader, end, newsize int64, err error) {
r = bio.NewReader(f)
// check object header
line, err := r.ReadString('\n')
if err != nil {
return
}
var fingerprint goobj.FingerprintType
switch c {
case '\n':
base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path)
return nil
case 'B':
if base.Debug.Export != 0 {
fmt.Printf("importing %s (%s)\n", path, file)
if line == "!<arch>\n" { // package archive
// package export block should be first
sz := int64(archive.ReadHeader(r.Reader, "__.PKGDEF"))
if sz <= 0 {
err = errors.New("not a package file")
return
}
imp.ReadByte() // skip \n after $$B
c, err = imp.ReadByte()
end = r.Offset() + sz
line, err = r.ReadString('\n')
if err != nil {
base.Errorf("import %s: reading input: %v", file, err)
base.ErrorExit()
return
}
// Indexed format is distinguished by an 'i' byte,
// whereas previous export formats started with 'c', 'd', or 'v'.
if c != 'i' {
base.Errorf("import %s: unexpected package format byte: %v", file, c)
base.ErrorExit()
} else {
// Not an archive; provide end of file instead.
// TODO(mdempsky): I don't think this happens anymore.
var fi os.FileInfo
fi, err = f.Stat()
if err != nil {
return
}
fingerprint = typecheck.ReadImports(importpkg, imp)
default:
base.Errorf("no import in %q", path)
base.ErrorExit()
end = fi.Size()
}
if !strings.HasPrefix(line, "go object ") {
err = fmt.Errorf("not a go object file: %s", line)
return
}
if expect := objabi.HeaderString(); line != expect {
err = fmt.Errorf("object is [%s] expected [%s]", line, expect)
return
}
// process header lines
for !strings.HasPrefix(line, "$$") {
if strings.HasPrefix(line, "newexportsize ") {
fields := strings.Fields(line)
newsize, err = strconv.ParseInt(fields[1], 10, 64)
if err != nil {
return
}
}
line, err = r.ReadString('\n')
if err != nil {
return
}
}
// Expect $$B\n to signal binary import format.
if line != "$$B\n" {
err = errors.New("old export format no longer supported (recompile library)")
return
}
return
}
// addFingerprint reads the linker fingerprint included at the end of
// the exportdata.
func addFingerprint(path string, f *os.File, end int64) error {
const eom = "\n$$\n"
var fingerprint goobj.FingerprintType
var buf [len(fingerprint) + len(eom)]byte
if _, err := f.ReadAt(buf[:], end-int64(len(buf))); err != nil {
return err
}
// Caller should have given us the end position of the export data,
// which should end with the "\n$$\n" marker. As a consistency check
// to make sure we're reading at the right offset, make sure we
// found the marker.
if s := string(buf[len(fingerprint):]); s != eom {
return fmt.Errorf("expected $$ marker, but found %q", s)
}
copy(fingerprint[:], buf[:])
// assume files move (get installed) so don't record the full path
if base.Flag.Cfg.PackageFile != nil {
// If using a packageFile map, assume path_ can be recorded directly.
base.Ctxt.AddImport(path, fingerprint)
} else {
// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
file := f.Name()
base.Ctxt.AddImport(file[len(file)-len(path)-len(".a"):], fingerprint)
}
if importpkg.Height >= myheight {
myheight = importpkg.Height + 1
}
return importpkg
return nil
}
// The linker uses the magic symbol prefixes "go." and "type."
@ -431,7 +523,7 @@ func clearImports() {
s.Def = nil
continue
}
if types.IsDotAlias(s) {
if s.Def != nil && s.Def.Sym() != s {
// throw away top-level name left over
// from previous import . "x"
// We'll report errors after type checking in CheckDotImports.

View file

@ -18,9 +18,9 @@ import (
"cmd/internal/src"
)
// check2 type checks a Go package using types2, and then generates IR
// using the results.
func check2(noders []*noder) {
// checkFiles configures and runs the types2 checker on the given
// parsed source files and then returns the result.
func checkFiles(noders []*noder) (posMap, *types2.Package, *types2.Info) {
if base.SyntaxErrors() != 0 {
base.ErrorExit()
}
@ -34,20 +34,22 @@ func check2(noders []*noder) {
}
// typechecking
importer := gcimports{
packages: make(map[string]*types2.Package),
}
conf := types2.Config{
GoVersion: base.Flag.Lang,
IgnoreLabels: true, // parser already checked via syntax.CheckBranches mode
CompilerErrorMessages: true, // use error strings matching existing compiler errors
AllowTypeLists: true, // remove this line once all tests use type set syntax
Error: func(err error) {
terr := err.(types2.Error)
base.ErrorfAt(m.makeXPos(terr.Pos), "%s", terr.Msg)
},
Importer: &gcimports{
packages: make(map[string]*types2.Package),
},
Sizes: &gcSizes{},
Importer: &importer,
Sizes: &gcSizes{},
}
info := types2.Info{
info := &types2.Info{
Types: make(map[syntax.Expr]types2.TypeAndValue),
Defs: make(map[*syntax.Name]types2.Object),
Uses: make(map[*syntax.Name]types2.Object),
@ -57,12 +59,24 @@ func check2(noders []*noder) {
Inferred: make(map[syntax.Expr]types2.Inferred),
// expand as needed
}
pkg, err := conf.Check(base.Ctxt.Pkgpath, files, &info)
files = nil
pkg := types2.NewPackage(base.Ctxt.Pkgpath, "")
importer.check = types2.NewChecker(&conf, pkg, info)
err := importer.check.Files(files)
base.ExitIfErrors()
if err != nil {
base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
}
return m, pkg, info
}
// check2 type checks a Go package using types2, and then generates IR
// using the results.
func check2(noders []*noder) {
m, pkg, info := checkFiles(noders)
if base.Flag.G < 2 {
os.Exit(0)
}
@ -70,7 +84,7 @@ func check2(noders []*noder) {
g := irgen{
target: typecheck.Target,
self: pkg,
info: &info,
info: info,
posMap: m,
objs: make(map[types2.Object]*ir.Name),
typs: make(map[types2.Type]*types.Type),
@ -82,6 +96,41 @@ func check2(noders []*noder) {
}
}
// gfInfo is information gathered on a generic function.
type gfInfo struct {
tparams []*types.Type
derivedTypes []*types.Type
// Nodes in generic function that requires a subdictionary. Includes
// method and function calls (OCALL), function values (OFUNCINST), method
// values/expressions (OXDOT).
subDictCalls []ir.Node
// Nodes in generic functions that are a conversion from a typeparam/derived
// type to a specific interface.
itabConvs []ir.Node
// For type switches on nonempty interfaces, a map from OTYPE entries of
// HasTParam type, to the interface type we're switching from.
// TODO: what if the type we're switching from is a shape type?
type2switchType map[ir.Node]*types.Type
}
// instInfo is information gathered on an gcshape (or fully concrete)
// instantiation of a function.
type instInfo struct {
fun *ir.Func // The instantiated function (with body)
dictParam *ir.Name // The node inside fun that refers to the dictionary param
gf *ir.Name // The associated generic function
gfInfo *gfInfo
startSubDict int // Start of dict entries for subdictionaries
startItabConv int // Start of dict entries for itab conversions
dictLen int // Total number of entries in dictionary
// Map from nodes in instantiated fun (OCALL, OCALLMETHOD, OFUNCINST, and
// OMETHEXPR) to the associated dictionary entry for a sub-dictionary
dictEntryMap map[ir.Node]int
}
type irgen struct {
target *ir.Package
self *types2.Package
@ -94,10 +143,38 @@ type irgen struct {
// Fully-instantiated generic types whose methods should be instantiated
instTypeList []*types.Type
dnum int // for generating unique dictionary variables
// Map from generic function to information about its type params, derived
// types, and subdictionaries.
gfInfoMap map[*types.Sym]*gfInfo
// Map from a name of function that been instantiated to information about
// its instantiated function, associated generic function/method, and the
// mapping from IR nodes to dictionary entries.
instInfoMap map[*types.Sym]*instInfo
// dictionary syms which we need to finish, by writing out any itabconv
// entries.
dictSymsToFinalize []*delayInfo
// True when we are compiling a top-level generic function or method. Use to
// avoid adding closures of generic functions/methods to the target.Decls
// list.
topFuncIsGeneric bool
}
type delayInfo struct {
gf *ir.Name
targs []*types.Type
sym *types.Sym
off int
}
func (g *irgen) generate(noders []*noder) {
types.LocalPkg.Name = g.self.Name()
types.LocalPkg.Height = g.self.Height()
typecheck.TypecheckAllowed = true
// Prevent size calculations until we set the underlying type
@ -132,7 +209,6 @@ Outer:
}
}
}
types.LocalPkg.Height = myheight
// 2. Process all package-block type declarations. As with imports,
// we need to make sure all types are properly instantiated before
@ -167,6 +243,10 @@ Outer:
}
}
// Check for unusual case where noder2 encounters a type error that types2
// doesn't check for (e.g. notinheap incompatibility).
base.ExitIfErrors()
typecheck.DeclareUniverse()
for _, p := range noders {
@ -175,7 +255,7 @@ Outer:
// Double check for any type-checking inconsistencies. This can be
// removed once we're confident in IR generation results.
syntax.Walk(p.file, func(n syntax.Node) bool {
syntax.Crawl(p.file, func(n syntax.Node) bool {
g.validate(n)
return false
})
@ -184,9 +264,9 @@ Outer:
// Create any needed stencils of generic functions
g.stencil()
// For now, remove all generic functions from g.target.Decl, since they
// have been used for stenciling, but don't compile. TODO: We will
// eventually export any exportable generic functions.
// Remove all generic functions from g.target.Decl, since they have been
// used for stenciling, but don't compile. Generic functions will already
// have been marked for export as appropriate.
j := 0
for i, decl := range g.target.Decls {
if decl.Op() != ir.ODCLFUNC || !decl.Type().HasTParam() {

View file

@ -0,0 +1,296 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"io"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/types"
"cmd/internal/goobj"
"cmd/internal/obj"
)
// This file implements the unified IR linker, which combines the
// local package's stub data with imported package data to produce a
// complete export data file. It also rewrites the compiler's
// extension data sections based on the results of compilation (e.g.,
// the function inlining cost and linker symbol index assignments).
//
// TODO(mdempsky): Using the name "linker" here is confusing, because
// readers are likely to mistake references to it for cmd/link. But
// there's a shortage of good names for "something that combines
// multiple parts into a cohesive whole"... e.g., "assembler" and
// "compiler" are also already taken.
type linker struct {
pw pkgEncoder
pkgs map[string]int
decls map[*types.Sym]int
}
func (l *linker) relocAll(pr *pkgReader, relocs []relocEnt) []relocEnt {
res := make([]relocEnt, len(relocs))
for i, rent := range relocs {
rent.idx = l.relocIdx(pr, rent.kind, rent.idx)
res[i] = rent
}
return res
}
func (l *linker) relocIdx(pr *pkgReader, k reloc, idx int) int {
assert(pr != nil)
absIdx := pr.absIdx(k, idx)
if newidx := pr.newindex[absIdx]; newidx != 0 {
return ^newidx
}
var newidx int
switch k {
case relocString:
newidx = l.relocString(pr, idx)
case relocPkg:
newidx = l.relocPkg(pr, idx)
case relocObj:
newidx = l.relocObj(pr, idx)
default:
// Generic relocations.
//
// TODO(mdempsky): Deduplicate more sections? In fact, I think
// every section could be deduplicated. This would also be easier
// if we do external relocations.
w := l.pw.newEncoderRaw(k)
l.relocCommon(pr, &w, k, idx)
newidx = w.idx
}
pr.newindex[absIdx] = ^newidx
return newidx
}
func (l *linker) relocString(pr *pkgReader, idx int) int {
return l.pw.stringIdx(pr.stringIdx(idx))
}
func (l *linker) relocPkg(pr *pkgReader, idx int) int {
path := pr.peekPkgPath(idx)
if newidx, ok := l.pkgs[path]; ok {
return newidx
}
r := pr.newDecoder(relocPkg, idx, syncPkgDef)
w := l.pw.newEncoder(relocPkg, syncPkgDef)
l.pkgs[path] = w.idx
// TODO(mdempsky): We end up leaving an empty string reference here
// from when the package was originally written as "". Probably not
// a big deal, but a little annoying. Maybe relocating
// cross-references in place is the way to go after all.
w.relocs = l.relocAll(pr, r.relocs)
_ = r.string() // original path
w.string(path)
io.Copy(&w.data, &r.data)
return w.flush()
}
func (l *linker) relocObj(pr *pkgReader, idx int) int {
path, name, tag := pr.peekObj(idx)
sym := types.NewPkg(path, "").Lookup(name)
if newidx, ok := l.decls[sym]; ok {
return newidx
}
if tag == objStub && path != "builtin" && path != "unsafe" {
pri, ok := objReader[sym]
if !ok {
base.Fatalf("missing reader for %q.%v", path, name)
}
assert(ok)
pr = pri.pr
idx = pri.idx
path2, name2, tag2 := pr.peekObj(idx)
sym2 := types.NewPkg(path2, "").Lookup(name2)
assert(sym == sym2)
assert(tag2 != objStub)
}
w := l.pw.newEncoderRaw(relocObj)
wext := l.pw.newEncoderRaw(relocObjExt)
wname := l.pw.newEncoderRaw(relocName)
wdict := l.pw.newEncoderRaw(relocObjDict)
l.decls[sym] = w.idx
assert(wext.idx == w.idx)
assert(wname.idx == w.idx)
assert(wdict.idx == w.idx)
l.relocCommon(pr, &w, relocObj, idx)
l.relocCommon(pr, &wname, relocName, idx)
l.relocCommon(pr, &wdict, relocObjDict, idx)
var obj *ir.Name
if path == "" {
var ok bool
obj, ok = sym.Def.(*ir.Name)
// Generic types and functions and declared constraint types won't
// have definitions.
// For now, just generically copy their extension data.
// TODO(mdempsky): Restore assertion.
if !ok && false {
base.Fatalf("missing definition for %v", sym)
}
}
if obj != nil {
wext.sync(syncObject1)
switch tag {
case objFunc:
l.relocFuncExt(&wext, obj)
case objType:
l.relocTypeExt(&wext, obj)
case objVar:
l.relocVarExt(&wext, obj)
}
wext.flush()
} else {
l.relocCommon(pr, &wext, relocObjExt, idx)
}
return w.idx
}
func (l *linker) relocCommon(pr *pkgReader, w *encoder, k reloc, idx int) {
r := pr.newDecoderRaw(k, idx)
w.relocs = l.relocAll(pr, r.relocs)
io.Copy(&w.data, &r.data)
w.flush()
}
func (l *linker) pragmaFlag(w *encoder, pragma ir.PragmaFlag) {
w.sync(syncPragma)
w.int(int(pragma))
}
func (l *linker) relocFuncExt(w *encoder, name *ir.Name) {
w.sync(syncFuncExt)
l.pragmaFlag(w, name.Func.Pragma)
l.linkname(w, name)
// Relocated extension data.
w.bool(true)
// Record definition ABI so cross-ABI calls can be direct.
// This is important for the performance of calling some
// common functions implemented in assembly (e.g., bytealg).
w.uint64(uint64(name.Func.ABI))
// Escape analysis.
for _, fs := range &types.RecvsParams {
for _, f := range fs(name.Type()).FieldSlice() {
w.string(f.Note)
}
}
if inl := name.Func.Inl; w.bool(inl != nil) {
w.len(int(inl.Cost))
w.bool(inl.CanDelayResults)
pri, ok := bodyReader[name.Func]
assert(ok)
w.reloc(relocBody, l.relocIdx(pri.pr, relocBody, pri.idx))
}
w.sync(syncEOF)
}
func (l *linker) relocTypeExt(w *encoder, name *ir.Name) {
w.sync(syncTypeExt)
typ := name.Type()
l.pragmaFlag(w, name.Pragma())
// For type T, export the index of type descriptor symbols of T and *T.
l.lsymIdx(w, "", reflectdata.TypeLinksym(typ))
l.lsymIdx(w, "", reflectdata.TypeLinksym(typ.PtrTo()))
if typ.Kind() != types.TINTER {
for _, method := range typ.Methods().Slice() {
l.relocFuncExt(w, method.Nname.(*ir.Name))
}
}
}
func (l *linker) relocVarExt(w *encoder, name *ir.Name) {
w.sync(syncVarExt)
l.linkname(w, name)
}
func (l *linker) linkname(w *encoder, name *ir.Name) {
w.sync(syncLinkname)
linkname := name.Sym().Linkname
if !l.lsymIdx(w, linkname, name.Linksym()) {
w.string(linkname)
}
}
func (l *linker) lsymIdx(w *encoder, linkname string, lsym *obj.LSym) bool {
if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || linkname != "" {
w.int64(-1)
return false
}
// For a defined symbol, export its index.
// For re-exporting an imported symbol, pass its index through.
w.int64(int64(lsym.SymIdx))
return true
}
// @@@ Helpers
// TODO(mdempsky): These should probably be removed. I think they're a
// smell that the export data format is not yet quite right.
func (pr *pkgDecoder) peekPkgPath(idx int) string {
r := pr.newDecoder(relocPkg, idx, syncPkgDef)
path := r.string()
if path == "" {
path = pr.pkgPath
}
return path
}
func (pr *pkgDecoder) peekObj(idx int) (string, string, codeObj) {
r := pr.newDecoder(relocName, idx, syncObject1)
r.sync(syncSym)
r.sync(syncPkg)
path := pr.peekPkgPath(r.reloc(relocPkg))
name := r.string()
assert(name != "")
tag := codeObj(r.code(syncCodeObj))
return path, name, tag
}

View file

@ -5,9 +5,11 @@
package noder
import (
"errors"
"fmt"
"go/constant"
"go/token"
"internal/buildcfg"
"os"
"path/filepath"
"runtime"
@ -29,8 +31,11 @@ import (
func LoadPackage(filenames []string) {
base.Timer.Start("fe", "parse")
// -G=3 and unified expect generics syntax, but -G=0 does not.
supportsGenerics := base.Flag.G != 0 || buildcfg.Experiment.Unified
mode := syntax.CheckBranches
if base.Flag.G != 0 {
if supportsGenerics && types.AllowsGoVersion(types.LocalPkg, 1, 18) {
mode |= syntax.AllowGenerics
}
@ -75,6 +80,11 @@ func LoadPackage(filenames []string) {
}
base.Timer.AddEvent(int64(lines), "lines")
if base.Debug.Unified != 0 {
unified(noders)
return
}
if base.Flag.G != 0 {
// Use types2 to type-check and possibly generate IR.
check2(noders)
@ -109,25 +119,35 @@ func LoadPackage(filenames []string) {
// We also defer type alias declarations until phase 2
// to avoid cycles like #18640.
// TODO(gri) Remove this again once we have a fix for #25838.
// Don't use range--typecheck can add closures to Target.Decls.
base.Timer.Start("fe", "typecheck", "top1")
for i := 0; i < len(typecheck.Target.Decls); i++ {
n := typecheck.Target.Decls[i]
if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Alias()) {
typecheck.Target.Decls[i] = typecheck.Stmt(n)
}
}
//
// Phase 2: Variable assignments.
// To check interface assignments, depends on phase 1.
// Don't use range--typecheck can add closures to Target.Decls.
base.Timer.Start("fe", "typecheck", "top2")
for i := 0; i < len(typecheck.Target.Decls); i++ {
n := typecheck.Target.Decls[i]
if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias() {
typecheck.Target.Decls[i] = typecheck.Stmt(n)
for phase, name := range []string{"top1", "top2"} {
base.Timer.Start("fe", "typecheck", name)
for i := 0; i < len(typecheck.Target.Decls); i++ {
n := typecheck.Target.Decls[i]
op := n.Op()
// Closure function declarations are typechecked as part of the
// closure expression.
if fn, ok := n.(*ir.Func); ok && fn.OClosure != nil {
continue
}
// We don't actually add ir.ODCL nodes to Target.Decls. Make sure of that.
if op == ir.ODCL {
base.FatalfAt(n.Pos(), "unexpected top declaration: %v", op)
}
// Identify declarations that should be deferred to the second
// iteration.
late := op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias()
if late == (phase == 1) {
typecheck.Target.Decls[i] = typecheck.Stmt(n)
}
}
}
@ -136,16 +156,15 @@ func LoadPackage(filenames []string) {
base.Timer.Start("fe", "typecheck", "func")
var fcount int64
for i := 0; i < len(typecheck.Target.Decls); i++ {
n := typecheck.Target.Decls[i]
if n.Op() == ir.ODCLFUNC {
if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
if base.Flag.W > 1 {
s := fmt.Sprintf("\nbefore typecheck %v", n)
ir.Dump(s, n)
s := fmt.Sprintf("\nbefore typecheck %v", fn)
ir.Dump(s, fn)
}
typecheck.FuncBody(n.(*ir.Func))
typecheck.FuncBody(fn)
if base.Flag.W > 1 {
s := fmt.Sprintf("\nafter typecheck %v", n)
ir.Dump(s, n)
s := fmt.Sprintf("\nafter typecheck %v", fn)
ir.Dump(s, fn)
}
fcount++
}
@ -449,7 +468,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
type constState struct {
group *syntax.Group
typ ir.Ntype
values []ir.Node
values syntax.Expr
iota int64
}
@ -467,16 +486,15 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
names := p.declNames(ir.OLITERAL, decl.NameList)
typ := p.typeExprOrNil(decl.Type)
var values []ir.Node
if decl.Values != nil {
values = p.exprList(decl.Values)
cs.typ, cs.values = typ, values
cs.typ, cs.values = typ, decl.Values
} else {
if typ != nil {
base.Errorf("const declaration cannot have type without expression")
}
typ, values = cs.typ, cs.values
typ = cs.typ
}
values := p.exprList(cs.values)
nn := make([]ir.Node, 0, len(names))
for i, n := range names {
@ -484,10 +502,16 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
base.Errorf("missing value in const declaration")
break
}
v := values[i]
if decl.Values == nil {
v = ir.DeepCopy(n.Pos(), v)
ir.Visit(v, func(v ir.Node) {
if ir.HasUniquePos(v) {
v.SetPos(n.Pos())
}
})
}
typecheck.Declare(n, typecheck.DeclContext)
n.Ntype = typ
@ -625,6 +649,9 @@ func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Field {
for i, param := range params {
p.setlineno(param)
nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
if i > 0 && params[i].Type == params[i-1].Type {
nodes[i].Ntype = nodes[i-1].Ntype
}
}
return nodes
}
@ -914,6 +941,9 @@ func (p *noder) structType(expr *syntax.StructType) ir.Node {
} else {
n = ir.NewField(p.pos(field), p.name(field.Name), p.typeExpr(field.Type), nil)
}
if i > 0 && expr.FieldList[i].Type == expr.FieldList[i-1].Type {
n.Ntype = l[i-1].Ntype
}
if i < len(expr.TagList) && expr.TagList[i] != nil {
n.Note = constant.StringVal(p.basicLit(expr.TagList[i]))
}
@ -977,6 +1007,8 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym {
}
func (p *noder) embedded(typ syntax.Expr) *ir.Field {
pos := p.pos(syntax.StartPos(typ))
op, isStar := typ.(*syntax.Operation)
if isStar {
if op.Op != syntax.Mul || op.Y != nil {
@ -986,11 +1018,11 @@ func (p *noder) embedded(typ syntax.Expr) *ir.Field {
}
sym := p.packname(typ)
n := ir.NewField(p.pos(typ), typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
n := ir.NewField(pos, typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
n.Embedded = true
if isStar {
n.Ntype = ir.NewStarExpr(p.pos(op), n.Ntype)
n.Ntype = ir.NewStarExpr(pos, n.Ntype)
}
return n
}
@ -1780,24 +1812,14 @@ func fakeRecv() *ir.Field {
}
func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
xtype := p.typeExpr(expr.Type)
fn := ir.NewFunc(p.pos(expr))
fn.SetIsHiddenClosure(ir.CurFunc != nil)
fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by tcClosure
fn.Nname.Func = fn
fn.Nname.Ntype = xtype
fn.Nname.Defn = fn
clo := ir.NewClosureExpr(p.pos(expr), fn)
fn.OClosure = clo
fn := ir.NewClosureFunc(p.pos(expr), ir.CurFunc != nil)
fn.Nname.Ntype = p.typeExpr(expr.Type)
p.funcBody(fn, expr.Body)
ir.FinishCaptureNames(base.Pos, ir.CurFunc, fn)
return clo
return fn.OClosure
}
// A function named init is a special case.
@ -1841,33 +1863,14 @@ func oldname(s *types.Sym) ir.Node {
}
func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) {
if pragma.Embeds == nil {
return
}
pragmaEmbeds := pragma.Embeds
pragma.Embeds = nil
pos := makeXPos(pragmaEmbeds[0].Pos)
if len(pragmaEmbeds) == 0 {
return
}
if !haveEmbed {
base.ErrorfAt(pos, "go:embed only allowed in Go files that import \"embed\"")
return
}
if len(decl.NameList) > 1 {
base.ErrorfAt(pos, "go:embed cannot apply to multiple vars")
return
}
if decl.Values != nil {
base.ErrorfAt(pos, "go:embed cannot apply to var with initializer")
return
}
if decl.Type == nil {
// Should not happen, since Values == nil now.
base.ErrorfAt(pos, "go:embed cannot apply to var without type")
return
}
if typecheck.DeclContext != ir.PEXTERN {
base.ErrorfAt(pos, "go:embed cannot apply to var inside func")
if err := checkEmbed(decl, haveEmbed, typecheck.DeclContext != ir.PEXTERN); err != nil {
base.ErrorfAt(makeXPos(pragmaEmbeds[0].Pos), "%s", err)
return
}
@ -1878,3 +1881,24 @@ func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.Va
typecheck.Target.Embeds = append(typecheck.Target.Embeds, name)
name.Embed = &embeds
}
func checkEmbed(decl *syntax.VarDecl, haveEmbed, withinFunc bool) error {
switch {
case !haveEmbed:
return errors.New("go:embed only allowed in Go files that import \"embed\"")
case len(decl.NameList) > 1:
return errors.New("go:embed cannot apply to multiple vars")
case decl.Values != nil:
return errors.New("go:embed cannot apply to var with initializer")
case decl.Type == nil:
// Should not happen, since Values == nil now.
return errors.New("go:embed cannot apply to var without type")
case withinFunc:
return errors.New("go:embed cannot apply to var inside func")
case !types.AllowsGoVersion(types.LocalPkg, 1, 16):
return fmt.Errorf("go:embed requires go1.16 or later (-lang was set to %s; check go.mod)", base.Flag.Lang)
default:
return nil
}
}

View file

@ -29,7 +29,7 @@ func (g *irgen) use(name *syntax.Name) *ir.Name {
if !ok {
base.FatalfAt(g.pos(name), "unknown name %v", name)
}
obj := ir.CaptureName(g.pos(obj2), ir.CurFunc, g.obj(obj2))
obj := ir.CaptureName(g.pos(name), ir.CurFunc, g.obj(obj2))
if obj.Defn != nil && obj.Defn.Op() == ir.ONAME {
// If CaptureName created a closure variable, then transfer the
// type of the captured name to the new closure variable.
@ -49,6 +49,11 @@ func (g *irgen) obj(obj types2.Object) *ir.Name {
// For imported objects, we use iimport directly instead of mapping
// the types2 representation.
if obj.Pkg() != g.self {
if sig, ok := obj.Type().(*types2.Signature); ok && sig.Recv() != nil {
// We can't import a method by name - must import the type
// and access the method from it.
base.FatalfAt(g.pos(obj), "tried to import a method directly")
}
sym := g.sym(obj)
if sym.Def != nil {
return sym.Def.(*ir.Name)
@ -101,25 +106,28 @@ func (g *irgen) obj(obj types2.Object) *ir.Name {
case *types2.TypeName:
if obj.IsAlias() {
name = g.objCommon(pos, ir.OTYPE, g.sym(obj), class, g.typ(obj.Type()))
name.SetAlias(true)
} else {
name = ir.NewDeclNameAt(pos, ir.OTYPE, g.sym(obj))
g.objFinish(name, class, types.NewNamed(name))
}
case *types2.Var:
var sym *types.Sym
if class == ir.PPARAMOUT {
sym := g.sym(obj)
if class == ir.PPARAMOUT && (sym == nil || sym.IsBlank()) {
// Backend needs names for result parameters,
// even if they're anonymous or blank.
switch obj.Name() {
case "":
sym = typecheck.LookupNum("~r", len(ir.CurFunc.Dcl)) // 'r' for "result"
case "_":
sym = typecheck.LookupNum("~b", len(ir.CurFunc.Dcl)) // 'b' for "blank"
nresults := 0
for _, n := range ir.CurFunc.Dcl {
if n.Class == ir.PPARAMOUT {
nresults++
}
}
if sym == nil {
sym = typecheck.LookupNum("~r", nresults) // 'r' for "result"
} else {
sym = typecheck.LookupNum("~b", nresults) // 'b' for "blank"
}
}
if sym == nil {
sym = g.sym(obj)
}
name = g.objCommon(pos, ir.ONAME, sym, class, g.typ(obj.Type()))
@ -164,9 +172,8 @@ func (g *irgen) objFinish(name *ir.Name, class ir.Class, typ *types.Type) {
break // methods are exported with their receiver type
}
if types.IsExported(sym.Name) {
if name.Class == ir.PFUNC && name.Type().NumTParams() > 0 {
base.FatalfAt(name.Pos(), "Cannot export a generic function (yet): %v", name)
}
// Generic functions can be marked for export here, even
// though they will not be compiled until instantiated.
typecheck.Export(name)
}
if base.Flag.AsmHdr != "" && !name.Sym().Asm() {

View file

@ -0,0 +1,450 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"fmt"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"cmd/internal/src"
)
// This file defines helper functions useful for satisfying toolstash
// -cmp when compared against the legacy frontend behavior, but can be
// removed after that's no longer a concern.
// quirksMode controls whether behavior specific to satisfying
// toolstash -cmp is used.
func quirksMode() bool {
return base.Debug.UnifiedQuirks != 0
}
// posBasesOf returns all of the position bases in the source files,
// as seen in a straightforward traversal.
//
// This is necessary to ensure position bases (and thus file names)
// get registered in the same order as noder would visit them.
func posBasesOf(noders []*noder) []*syntax.PosBase {
seen := make(map[*syntax.PosBase]bool)
var bases []*syntax.PosBase
for _, p := range noders {
syntax.Crawl(p.file, func(n syntax.Node) bool {
if b := n.Pos().Base(); !seen[b] {
bases = append(bases, b)
seen[b] = true
}
return false
})
}
return bases
}
// importedObjsOf returns the imported objects (i.e., referenced
// objects not declared by curpkg) from the parsed source files, in
// the order that typecheck used to load their definitions.
//
// This is needed because loading the definitions for imported objects
// can also add file names.
func importedObjsOf(curpkg *types2.Package, info *types2.Info, noders []*noder) []types2.Object {
// This code is complex because it matches the precise order that
// typecheck recursively and repeatedly traverses the IR. It's meant
// to be thrown away eventually anyway.
seen := make(map[types2.Object]bool)
var objs []types2.Object
var phase int
decls := make(map[types2.Object]syntax.Decl)
assoc := func(decl syntax.Decl, names ...*syntax.Name) {
for _, name := range names {
obj, ok := info.Defs[name]
assert(ok)
decls[obj] = decl
}
}
for _, p := range noders {
syntax.Crawl(p.file, func(n syntax.Node) bool {
switch n := n.(type) {
case *syntax.ConstDecl:
assoc(n, n.NameList...)
case *syntax.FuncDecl:
assoc(n, n.Name)
case *syntax.TypeDecl:
assoc(n, n.Name)
case *syntax.VarDecl:
assoc(n, n.NameList...)
case *syntax.BlockStmt:
return true
}
return false
})
}
var visited map[syntax.Decl]bool
var resolveDecl func(n syntax.Decl)
var resolveNode func(n syntax.Node, top bool)
resolveDecl = func(n syntax.Decl) {
if visited[n] {
return
}
visited[n] = true
switch n := n.(type) {
case *syntax.ConstDecl:
resolveNode(n.Type, true)
resolveNode(n.Values, true)
case *syntax.FuncDecl:
if n.Recv != nil {
resolveNode(n.Recv, true)
}
resolveNode(n.Type, true)
case *syntax.TypeDecl:
resolveNode(n.Type, true)
case *syntax.VarDecl:
if n.Type != nil {
resolveNode(n.Type, true)
} else {
resolveNode(n.Values, true)
}
}
}
resolveObj := func(pos syntax.Pos, obj types2.Object) {
switch obj.Pkg() {
case nil:
// builtin; nothing to do
case curpkg:
if decl, ok := decls[obj]; ok {
resolveDecl(decl)
}
default:
if obj.Parent() == obj.Pkg().Scope() && !seen[obj] {
seen[obj] = true
objs = append(objs, obj)
}
}
}
checkdefat := func(pos syntax.Pos, n *syntax.Name) {
if n.Value == "_" {
return
}
obj, ok := info.Uses[n]
if !ok {
obj, ok = info.Defs[n]
if !ok {
return
}
}
if obj == nil {
return
}
resolveObj(pos, obj)
}
checkdef := func(n *syntax.Name) { checkdefat(n.Pos(), n) }
var later []syntax.Node
resolveNode = func(n syntax.Node, top bool) {
if n == nil {
return
}
syntax.Crawl(n, func(n syntax.Node) bool {
switch n := n.(type) {
case *syntax.Name:
checkdef(n)
case *syntax.SelectorExpr:
if name, ok := n.X.(*syntax.Name); ok {
if _, isPkg := info.Uses[name].(*types2.PkgName); isPkg {
checkdefat(n.X.Pos(), n.Sel)
return true
}
}
case *syntax.AssignStmt:
resolveNode(n.Rhs, top)
resolveNode(n.Lhs, top)
return true
case *syntax.VarDecl:
resolveNode(n.Values, top)
case *syntax.FuncLit:
if top {
resolveNode(n.Type, top)
later = append(later, n.Body)
return true
}
case *syntax.BlockStmt:
if phase >= 3 {
for _, stmt := range n.List {
resolveNode(stmt, false)
}
}
return true
}
return false
})
}
for phase = 1; phase <= 5; phase++ {
visited = map[syntax.Decl]bool{}
for _, p := range noders {
for _, decl := range p.file.DeclList {
switch decl := decl.(type) {
case *syntax.ConstDecl:
resolveDecl(decl)
case *syntax.FuncDecl:
resolveDecl(decl)
if phase >= 3 && decl.Body != nil {
resolveNode(decl.Body, true)
}
case *syntax.TypeDecl:
if !decl.Alias || phase >= 2 {
resolveDecl(decl)
}
case *syntax.VarDecl:
if phase >= 2 {
resolveNode(decl.Values, true)
resolveDecl(decl)
}
}
}
if phase >= 5 {
syntax.Crawl(p.file, func(n syntax.Node) bool {
if name, ok := n.(*syntax.Name); ok {
if obj, ok := info.Uses[name]; ok {
resolveObj(name.Pos(), obj)
}
}
return false
})
}
}
for i := 0; i < len(later); i++ {
resolveNode(later[i], true)
}
later = nil
}
return objs
}
// typeExprEndPos returns the position that noder would leave base.Pos
// after parsing the given type expression.
func typeExprEndPos(expr0 syntax.Expr) syntax.Pos {
for {
switch expr := expr0.(type) {
case *syntax.Name:
return expr.Pos()
case *syntax.SelectorExpr:
return expr.X.Pos()
case *syntax.ParenExpr:
expr0 = expr.X
case *syntax.Operation:
assert(expr.Op == syntax.Mul)
assert(expr.Y == nil)
expr0 = expr.X
case *syntax.ArrayType:
expr0 = expr.Elem
case *syntax.ChanType:
expr0 = expr.Elem
case *syntax.DotsType:
expr0 = expr.Elem
case *syntax.MapType:
expr0 = expr.Value
case *syntax.SliceType:
expr0 = expr.Elem
case *syntax.StructType:
return expr.Pos()
case *syntax.InterfaceType:
expr0 = lastFieldType(expr.MethodList)
if expr0 == nil {
return expr.Pos()
}
case *syntax.FuncType:
expr0 = lastFieldType(expr.ResultList)
if expr0 == nil {
expr0 = lastFieldType(expr.ParamList)
if expr0 == nil {
return expr.Pos()
}
}
case *syntax.IndexExpr: // explicit type instantiation
targs := unpackListExpr(expr.Index)
expr0 = targs[len(targs)-1]
default:
panic(fmt.Sprintf("%s: unexpected type expression %v", expr.Pos(), syntax.String(expr)))
}
}
}
func lastFieldType(fields []*syntax.Field) syntax.Expr {
if len(fields) == 0 {
return nil
}
return fields[len(fields)-1].Type
}
// sumPos returns the position that noder.sum would produce for
// constant expression x.
func sumPos(x syntax.Expr) syntax.Pos {
orig := x
for {
switch x1 := x.(type) {
case *syntax.BasicLit:
assert(x1.Kind == syntax.StringLit)
return x1.Pos()
case *syntax.Operation:
assert(x1.Op == syntax.Add && x1.Y != nil)
if r, ok := x1.Y.(*syntax.BasicLit); ok {
assert(r.Kind == syntax.StringLit)
x = x1.X
continue
}
}
return orig.Pos()
}
}
// funcParamsEndPos returns the value of base.Pos left by noder after
// processing a function signature.
func funcParamsEndPos(fn *ir.Func) src.XPos {
sig := fn.Nname.Type()
fields := sig.Results().FieldSlice()
if len(fields) == 0 {
fields = sig.Params().FieldSlice()
if len(fields) == 0 {
fields = sig.Recvs().FieldSlice()
if len(fields) == 0 {
if fn.OClosure != nil {
return fn.Nname.Ntype.Pos()
}
return fn.Pos()
}
}
}
return fields[len(fields)-1].Pos
}
type dupTypes struct {
origs map[types2.Type]types2.Type
}
func (d *dupTypes) orig(t types2.Type) types2.Type {
if orig, ok := d.origs[t]; ok {
return orig
}
return t
}
func (d *dupTypes) add(t, orig types2.Type) {
if t == orig {
return
}
if d.origs == nil {
d.origs = make(map[types2.Type]types2.Type)
}
assert(d.origs[t] == nil)
d.origs[t] = orig
switch t := t.(type) {
case *types2.Pointer:
orig := orig.(*types2.Pointer)
d.add(t.Elem(), orig.Elem())
case *types2.Slice:
orig := orig.(*types2.Slice)
d.add(t.Elem(), orig.Elem())
case *types2.Map:
orig := orig.(*types2.Map)
d.add(t.Key(), orig.Key())
d.add(t.Elem(), orig.Elem())
case *types2.Array:
orig := orig.(*types2.Array)
assert(t.Len() == orig.Len())
d.add(t.Elem(), orig.Elem())
case *types2.Chan:
orig := orig.(*types2.Chan)
assert(t.Dir() == orig.Dir())
d.add(t.Elem(), orig.Elem())
case *types2.Struct:
orig := orig.(*types2.Struct)
assert(t.NumFields() == orig.NumFields())
for i := 0; i < t.NumFields(); i++ {
d.add(t.Field(i).Type(), orig.Field(i).Type())
}
case *types2.Interface:
orig := orig.(*types2.Interface)
assert(t.NumExplicitMethods() == orig.NumExplicitMethods())
assert(t.NumEmbeddeds() == orig.NumEmbeddeds())
for i := 0; i < t.NumExplicitMethods(); i++ {
d.add(t.ExplicitMethod(i).Type(), orig.ExplicitMethod(i).Type())
}
for i := 0; i < t.NumEmbeddeds(); i++ {
d.add(t.EmbeddedType(i), orig.EmbeddedType(i))
}
case *types2.Signature:
orig := orig.(*types2.Signature)
assert((t.Recv() == nil) == (orig.Recv() == nil))
if t.Recv() != nil {
d.add(t.Recv().Type(), orig.Recv().Type())
}
d.add(t.Params(), orig.Params())
d.add(t.Results(), orig.Results())
case *types2.Tuple:
orig := orig.(*types2.Tuple)
assert(t.Len() == orig.Len())
for i := 0; i < t.Len(); i++ {
d.add(t.At(i).Type(), orig.At(i).Type())
}
default:
assert(types2.Identical(t, orig))
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,510 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"cmd/internal/src"
)
type pkgReader2 struct {
pkgDecoder
check *types2.Checker
imports map[string]*types2.Package
posBases []*syntax.PosBase
pkgs []*types2.Package
typs []types2.Type
}
func readPackage2(check *types2.Checker, imports map[string]*types2.Package, input pkgDecoder) *types2.Package {
pr := pkgReader2{
pkgDecoder: input,
check: check,
imports: imports,
posBases: make([]*syntax.PosBase, input.numElems(relocPosBase)),
pkgs: make([]*types2.Package, input.numElems(relocPkg)),
typs: make([]types2.Type, input.numElems(relocType)),
}
r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
pkg := r.pkg()
r.bool() // has init
for i, n := 0, r.len(); i < n; i++ {
r.obj()
}
r.sync(syncEOF)
pkg.MarkComplete()
return pkg
}
type reader2 struct {
decoder
p *pkgReader2
dict *reader2Dict
}
type reader2Dict struct {
bounds []typeInfo
tparams []*types2.TypeParam
derived []derivedInfo
derivedTypes []types2.Type
}
type reader2TypeBound struct {
derived bool
boundIdx int
}
func (pr *pkgReader2) newReader(k reloc, idx int, marker syncMarker) *reader2 {
return &reader2{
decoder: pr.newDecoder(k, idx, marker),
p: pr,
}
}
// @@@ Positions
func (r *reader2) pos() syntax.Pos {
r.sync(syncPos)
if !r.bool() {
return syntax.Pos{}
}
// TODO(mdempsky): Delta encoding.
posBase := r.posBase()
line := r.uint()
col := r.uint()
return syntax.MakePos(posBase, line, col)
}
func (r *reader2) posBase() *syntax.PosBase {
return r.p.posBaseIdx(r.reloc(relocPosBase))
}
func (pr *pkgReader2) posBaseIdx(idx int) *syntax.PosBase {
if b := pr.posBases[idx]; b != nil {
return b
}
r := pr.newReader(relocPosBase, idx, syncPosBase)
var b *syntax.PosBase
filename := r.string()
_ = r.string() // absolute file name
if r.bool() {
b = syntax.NewFileBase(filename)
} else {
pos := r.pos()
line := r.uint()
col := r.uint()
b = syntax.NewLineBase(pos, filename, line, col)
}
pr.posBases[idx] = b
return b
}
// @@@ Packages
func (r *reader2) pkg() *types2.Package {
r.sync(syncPkg)
return r.p.pkgIdx(r.reloc(relocPkg))
}
func (pr *pkgReader2) pkgIdx(idx int) *types2.Package {
// TODO(mdempsky): Consider using some non-nil pointer to indicate
// the universe scope, so we don't need to keep re-reading it.
if pkg := pr.pkgs[idx]; pkg != nil {
return pkg
}
pkg := pr.newReader(relocPkg, idx, syncPkgDef).doPkg()
pr.pkgs[idx] = pkg
return pkg
}
func (r *reader2) doPkg() *types2.Package {
path := r.string()
if path == "builtin" {
return nil // universe
}
if path == "" {
path = r.p.pkgPath
}
if pkg := r.p.imports[path]; pkg != nil {
return pkg
}
name := r.string()
height := r.len()
pkg := types2.NewPackageHeight(path, name, height)
r.p.imports[path] = pkg
// TODO(mdempsky): The list of imported packages is important for
// go/types, but we could probably skip populating it for types2.
imports := make([]*types2.Package, r.len())
for i := range imports {
imports[i] = r.pkg()
}
pkg.SetImports(imports)
return pkg
}
// @@@ Types
func (r *reader2) typ() types2.Type {
return r.p.typIdx(r.typInfo(), r.dict)
}
func (r *reader2) typInfo() typeInfo {
r.sync(syncType)
if r.bool() {
return typeInfo{idx: r.len(), derived: true}
}
return typeInfo{idx: r.reloc(relocType), derived: false}
}
func (pr *pkgReader2) typIdx(info typeInfo, dict *reader2Dict) types2.Type {
idx := info.idx
var where *types2.Type
if info.derived {
where = &dict.derivedTypes[idx]
idx = dict.derived[idx].idx
} else {
where = &pr.typs[idx]
}
if typ := *where; typ != nil {
return typ
}
r := pr.newReader(relocType, idx, syncTypeIdx)
r.dict = dict
typ := r.doTyp()
assert(typ != nil)
// See comment in pkgReader.typIdx explaining how this happens.
if prev := *where; prev != nil {
return prev
}
*where = typ
return typ
}
func (r *reader2) doTyp() (res types2.Type) {
switch tag := codeType(r.code(syncType)); tag {
default:
base.FatalfAt(src.NoXPos, "unhandled type tag: %v", tag)
panic("unreachable")
case typeBasic:
return types2.Typ[r.len()]
case typeNamed:
obj, targs := r.obj()
name := obj.(*types2.TypeName)
if len(targs) != 0 {
return r.p.check.InstantiateLazy(syntax.Pos{}, name.Type(), targs, nil, false)
}
return name.Type()
case typeTypeParam:
return r.dict.tparams[r.len()]
case typeArray:
len := int64(r.uint64())
return types2.NewArray(r.typ(), len)
case typeChan:
dir := types2.ChanDir(r.len())
return types2.NewChan(dir, r.typ())
case typeMap:
return types2.NewMap(r.typ(), r.typ())
case typePointer:
return types2.NewPointer(r.typ())
case typeSignature:
return r.signature(nil)
case typeSlice:
return types2.NewSlice(r.typ())
case typeStruct:
return r.structType()
case typeInterface:
return r.interfaceType()
case typeUnion:
return r.unionType()
}
}
func (r *reader2) structType() *types2.Struct {
fields := make([]*types2.Var, r.len())
var tags []string
for i := range fields {
pos := r.pos()
pkg, name := r.selector()
ftyp := r.typ()
tag := r.string()
embedded := r.bool()
fields[i] = types2.NewField(pos, pkg, name, ftyp, embedded)
if tag != "" {
for len(tags) < i {
tags = append(tags, "")
}
tags = append(tags, tag)
}
}
return types2.NewStruct(fields, tags)
}
func (r *reader2) unionType() *types2.Union {
terms := make([]*types2.Term, r.len())
for i := range terms {
terms[i] = types2.NewTerm(r.bool(), r.typ())
}
return types2.NewUnion(terms)
}
func (r *reader2) interfaceType() *types2.Interface {
methods := make([]*types2.Func, r.len())
embeddeds := make([]types2.Type, r.len())
for i := range methods {
pos := r.pos()
pkg, name := r.selector()
mtyp := r.signature(nil)
methods[i] = types2.NewFunc(pos, pkg, name, mtyp)
}
for i := range embeddeds {
embeddeds[i] = r.typ()
}
return types2.NewInterfaceType(methods, embeddeds)
}
func (r *reader2) signature(recv *types2.Var) *types2.Signature {
r.sync(syncSignature)
params := r.params()
results := r.params()
variadic := r.bool()
return types2.NewSignature(recv, params, results, variadic)
}
func (r *reader2) params() *types2.Tuple {
r.sync(syncParams)
params := make([]*types2.Var, r.len())
for i := range params {
params[i] = r.param()
}
return types2.NewTuple(params...)
}
func (r *reader2) param() *types2.Var {
r.sync(syncParam)
pos := r.pos()
pkg, name := r.localIdent()
typ := r.typ()
return types2.NewParam(pos, pkg, name, typ)
}
// @@@ Objects
func (r *reader2) obj() (types2.Object, []types2.Type) {
r.sync(syncObject)
assert(!r.bool())
pkg, name := r.p.objIdx(r.reloc(relocObj))
obj := pkg.Scope().Lookup(name)
targs := make([]types2.Type, r.len())
for i := range targs {
targs[i] = r.typ()
}
return obj, targs
}
func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
rname := pr.newReader(relocName, idx, syncObject1)
objPkg, objName := rname.qualifiedIdent()
assert(objName != "")
tag := codeObj(rname.code(syncCodeObj))
if tag == objStub {
assert(objPkg == nil)
return objPkg, objName
}
dict := pr.objDictIdx(idx)
r := pr.newReader(relocObj, idx, syncObject1)
r.dict = dict
objPkg.Scope().InsertLazy(objName, func() types2.Object {
switch tag {
default:
panic("weird")
case objAlias:
pos := r.pos()
typ := r.typ()
return types2.NewTypeName(pos, objPkg, objName, typ)
case objConst:
pos := r.pos()
typ, val := r.value()
return types2.NewConst(pos, objPkg, objName, typ, val)
case objFunc:
pos := r.pos()
tparams := r.typeParamNames()
sig := r.signature(nil)
sig.SetTParams(tparams)
return types2.NewFunc(pos, objPkg, objName, sig)
case objType:
pos := r.pos()
return types2.NewTypeNameLazy(pos, objPkg, objName, func(named *types2.Named) (tparams []*types2.TypeName, underlying types2.Type, methods []*types2.Func) {
tparams = r.typeParamNames()
// TODO(mdempsky): Rewrite receiver types to underlying is an
// Interface? The go/types importer does this (I think because
// unit tests expected that), but cmd/compile doesn't care
// about it, so maybe we can avoid worrying about that here.
underlying = r.typ().Underlying()
methods = make([]*types2.Func, r.len())
for i := range methods {
methods[i] = r.method()
}
return
})
case objVar:
pos := r.pos()
typ := r.typ()
return types2.NewVar(pos, objPkg, objName, typ)
}
})
return objPkg, objName
}
func (r *reader2) value() (types2.Type, constant.Value) {
r.sync(syncValue)
return r.typ(), r.rawValue()
}
func (pr *pkgReader2) objDictIdx(idx int) *reader2Dict {
r := pr.newReader(relocObjDict, idx, syncObject1)
var dict reader2Dict
if implicits := r.len(); implicits != 0 {
base.Fatalf("unexpected object with %v implicit type parameter(s)", implicits)
}
dict.bounds = make([]typeInfo, r.len())
for i := range dict.bounds {
dict.bounds[i] = r.typInfo()
}
dict.derived = make([]derivedInfo, r.len())
dict.derivedTypes = make([]types2.Type, len(dict.derived))
for i := range dict.derived {
dict.derived[i] = derivedInfo{r.reloc(relocType), r.bool()}
}
// function references follow, but reader2 doesn't need those
return &dict
}
func (r *reader2) typeParamNames() []*types2.TypeName {
r.sync(syncTypeParamNames)
// Note: This code assumes it only processes objects without
// implement type parameters. This is currently fine, because
// reader2 is only used to read in exported declarations, which are
// always package scoped.
if len(r.dict.bounds) == 0 {
return nil
}
// Careful: Type parameter lists may have cycles. To allow for this,
// we construct the type parameter list in two passes: first we
// create all the TypeNames and TypeParams, then we construct and
// set the bound type.
names := make([]*types2.TypeName, len(r.dict.bounds))
r.dict.tparams = make([]*types2.TypeParam, len(r.dict.bounds))
for i := range r.dict.bounds {
pos := r.pos()
pkg, name := r.localIdent()
names[i] = types2.NewTypeName(pos, pkg, name, nil)
r.dict.tparams[i] = r.p.check.NewTypeParam(names[i], nil)
}
for i, bound := range r.dict.bounds {
r.dict.tparams[i].SetConstraint(r.p.typIdx(bound, r.dict))
}
return names
}
func (r *reader2) method() *types2.Func {
r.sync(syncMethod)
pos := r.pos()
pkg, name := r.selector()
rparams := r.typeParamNames()
sig := r.signature(r.param())
sig.SetRParams(rparams)
_ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
return types2.NewFunc(pos, pkg, name, sig)
}
func (r *reader2) qualifiedIdent() (*types2.Package, string) { return r.ident(syncSym) }
func (r *reader2) localIdent() (*types2.Package, string) { return r.ident(syncLocalIdent) }
func (r *reader2) selector() (*types2.Package, string) { return r.ident(syncSelector) }
func (r *reader2) ident(marker syncMarker) (*types2.Package, string) {
r.sync(marker)
return r.pkg(), r.string()
}

View file

@ -0,0 +1,42 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
// A reloc indicates a particular section within a unified IR export.
//
// TODO(mdempsky): Rename to "section" or something similar?
type reloc int
// A relocEnt (relocation entry) is an entry in an atom's local
// reference table.
//
// TODO(mdempsky): Rename this too.
type relocEnt struct {
kind reloc
idx int
}
// Reserved indices within the meta relocation section.
const (
publicRootIdx = 0
privateRootIdx = 1
)
const (
relocString reloc = iota
relocMeta
relocPosBase
relocPkg
relocName
relocType
relocObj
relocObjExt
relocObjDict
relocBody
numRelocs = iota
)

File diff suppressed because it is too large Load diff

View file

@ -35,11 +35,7 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
case *syntax.BlockStmt:
return ir.NewBlockStmt(g.pos(stmt), g.blockStmt(stmt))
case *syntax.ExprStmt:
x := g.expr(stmt.X)
if call, ok := x.(*ir.CallExpr); ok {
call.Use = ir.CallUseStmt
}
return x
return g.expr(stmt.X)
case *syntax.SendStmt:
n := ir.NewSendStmt(g.pos(stmt), g.expr(stmt.Chan), g.expr(stmt.Value))
if n.Chan.Type().HasTParam() || n.Value.Type().HasTParam() {
@ -61,7 +57,10 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
if stmt.Rhs == nil {
n = IncDec(g.pos(stmt), op, g.expr(stmt.Lhs))
} else {
n = ir.NewAssignOpStmt(g.pos(stmt), op, g.expr(stmt.Lhs), g.expr(stmt.Rhs))
// Eval rhs before lhs, for compatibility with noder1
rhs := g.expr(stmt.Rhs)
lhs := g.expr(stmt.Lhs)
n = ir.NewAssignOpStmt(g.pos(stmt), op, lhs, rhs)
}
if n.X.Typecheck() == 3 {
n.SetTypecheck(3)
@ -72,8 +71,9 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
return n
}
names, lhs := g.assignList(stmt.Lhs, stmt.Op == syntax.Def)
// Eval rhs before lhs, for compatibility with noder1
rhs := g.exprList(stmt.Rhs)
names, lhs := g.assignList(stmt.Lhs, stmt.Op == syntax.Def)
// We must delay transforming the assign statement if any of the
// lhs or rhs nodes are also delayed, since transformAssign needs
@ -128,6 +128,12 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
if e.Type().HasTParam() {
// Delay transforming the return statement if any of the
// return values have a type param.
if !ir.HasNamedResults(ir.CurFunc) {
transformArgs(n)
// But add CONVIFACE nodes where needed if
// any of the return values have interface type.
typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, true)
}
n.SetTypecheck(3)
return n
}
@ -266,6 +272,12 @@ func (g *irgen) forStmt(stmt *syntax.ForStmt) ir.Node {
key, value := unpackTwo(lhs)
n := ir.NewRangeStmt(g.pos(r), key, value, g.expr(r.X), g.blockStmt(stmt.Body))
n.Def = initDefn(n, names)
if key != nil {
transformCheckAssign(n, key)
}
if value != nil {
transformCheckAssign(n, value)
}
return n
}

View file

@ -0,0 +1,187 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"fmt"
"strings"
)
// enableSync controls whether sync markers are written into unified
// IR's export data format and also whether they're expected when
// reading them back in. They're inessential to the correct
// functioning of unified IR, but are helpful during development to
// detect mistakes.
//
// When sync is enabled, writer stack frames will also be included in
// the export data. Currently, a fixed number of frames are included,
// controlled by -d=syncframes (default 0).
const enableSync = true
// fmtFrames formats a backtrace for reporting reader/writer desyncs.
func fmtFrames(pcs ...uintptr) []string {
res := make([]string, 0, len(pcs))
walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
// Trim package from function name. It's just redundant noise.
name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
})
return res
}
type frameVisitor func(file string, line int, name string, offset uintptr)
// syncMarker is an enum type that represents markers that may be
// written to export data to ensure the reader and writer stay
// synchronized.
type syncMarker int
//go:generate stringer -type=syncMarker -trimprefix=sync
// TODO(mdempsky): Cleanup unneeded sync markers.
// TODO(mdempsky): Split these markers into public/stable markers, and
// private ones. Also, trim unused ones.
const (
_ syncMarker = iota
syncNode
syncBool
syncInt64
syncUint64
syncString
syncPos
syncPkg
syncSym
syncSelector
syncKind
syncType
syncTypePkg
syncSignature
syncParam
syncOp
syncObject
syncExpr
syncStmt
syncDecl
syncConstDecl
syncFuncDecl
syncTypeDecl
syncVarDecl
syncPragma
syncValue
syncEOF
syncMethod
syncFuncBody
syncUse
syncUseObj
syncObjectIdx
syncTypeIdx
syncBOF
syncEntry
syncOpenScope
syncCloseScope
syncGlobal
syncLocal
syncDefine
syncDefLocal
syncUseLocal
syncDefGlobal
syncUseGlobal
syncTypeParams
syncUseLabel
syncDefLabel
syncFuncLit
syncCommonFunc
syncBodyRef
syncLinksymExt
syncHack
syncSetlineno
syncName
syncImportDecl
syncDeclNames
syncDeclName
syncExprList
syncExprs
syncWrapname
syncTypeExpr
syncTypeExprOrNil
syncChanDir
syncParams
syncCloseAnotherScope
syncSum
syncUnOp
syncBinOp
syncStructType
syncInterfaceType
syncPackname
syncEmbedded
syncStmts
syncStmtsFall
syncStmtFall
syncBlockStmt
syncIfStmt
syncForStmt
syncSwitchStmt
syncRangeStmt
syncCaseClause
syncCommClause
syncSelectStmt
syncDecls
syncLabeledStmt
syncCompLit
sync1
sync2
sync3
sync4
syncN
syncDefImplicit
syncUseName
syncUseObjLocal
syncAddLocal
syncBothSignature
syncSetUnderlying
syncLinkname
syncStmt1
syncStmtsEnd
syncDeclare
syncTopDecls
syncTopConstDecl
syncTopFuncDecl
syncTopTypeDecl
syncTopVarDecl
syncObject1
syncAddBody
syncLabel
syncFuncExt
syncMethExt
syncOptLabel
syncScalar
syncStmtDecls
syncDeclLocal
syncObjLocal
syncObjLocal1
syncDeclareLocal
syncPublic
syncPrivate
syncRelocs
syncReloc
syncUseReloc
syncVarExt
syncPkgDef
syncTypeExt
syncVal
syncCodeObj
syncPosBase
syncLocalIdent
syncTypeParamNames
syncTypeParamBounds
syncImplicitTypes
syncObjectName
)

View file

@ -0,0 +1,155 @@
// Code generated by "stringer -type=syncMarker -trimprefix=sync"; DO NOT EDIT.
package noder
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[syncNode-1]
_ = x[syncBool-2]
_ = x[syncInt64-3]
_ = x[syncUint64-4]
_ = x[syncString-5]
_ = x[syncPos-6]
_ = x[syncPkg-7]
_ = x[syncSym-8]
_ = x[syncSelector-9]
_ = x[syncKind-10]
_ = x[syncType-11]
_ = x[syncTypePkg-12]
_ = x[syncSignature-13]
_ = x[syncParam-14]
_ = x[syncOp-15]
_ = x[syncObject-16]
_ = x[syncExpr-17]
_ = x[syncStmt-18]
_ = x[syncDecl-19]
_ = x[syncConstDecl-20]
_ = x[syncFuncDecl-21]
_ = x[syncTypeDecl-22]
_ = x[syncVarDecl-23]
_ = x[syncPragma-24]
_ = x[syncValue-25]
_ = x[syncEOF-26]
_ = x[syncMethod-27]
_ = x[syncFuncBody-28]
_ = x[syncUse-29]
_ = x[syncUseObj-30]
_ = x[syncObjectIdx-31]
_ = x[syncTypeIdx-32]
_ = x[syncBOF-33]
_ = x[syncEntry-34]
_ = x[syncOpenScope-35]
_ = x[syncCloseScope-36]
_ = x[syncGlobal-37]
_ = x[syncLocal-38]
_ = x[syncDefine-39]
_ = x[syncDefLocal-40]
_ = x[syncUseLocal-41]
_ = x[syncDefGlobal-42]
_ = x[syncUseGlobal-43]
_ = x[syncTypeParams-44]
_ = x[syncUseLabel-45]
_ = x[syncDefLabel-46]
_ = x[syncFuncLit-47]
_ = x[syncCommonFunc-48]
_ = x[syncBodyRef-49]
_ = x[syncLinksymExt-50]
_ = x[syncHack-51]
_ = x[syncSetlineno-52]
_ = x[syncName-53]
_ = x[syncImportDecl-54]
_ = x[syncDeclNames-55]
_ = x[syncDeclName-56]
_ = x[syncExprList-57]
_ = x[syncExprs-58]
_ = x[syncWrapname-59]
_ = x[syncTypeExpr-60]
_ = x[syncTypeExprOrNil-61]
_ = x[syncChanDir-62]
_ = x[syncParams-63]
_ = x[syncCloseAnotherScope-64]
_ = x[syncSum-65]
_ = x[syncUnOp-66]
_ = x[syncBinOp-67]
_ = x[syncStructType-68]
_ = x[syncInterfaceType-69]
_ = x[syncPackname-70]
_ = x[syncEmbedded-71]
_ = x[syncStmts-72]
_ = x[syncStmtsFall-73]
_ = x[syncStmtFall-74]
_ = x[syncBlockStmt-75]
_ = x[syncIfStmt-76]
_ = x[syncForStmt-77]
_ = x[syncSwitchStmt-78]
_ = x[syncRangeStmt-79]
_ = x[syncCaseClause-80]
_ = x[syncCommClause-81]
_ = x[syncSelectStmt-82]
_ = x[syncDecls-83]
_ = x[syncLabeledStmt-84]
_ = x[syncCompLit-85]
_ = x[sync1-86]
_ = x[sync2-87]
_ = x[sync3-88]
_ = x[sync4-89]
_ = x[syncN-90]
_ = x[syncDefImplicit-91]
_ = x[syncUseName-92]
_ = x[syncUseObjLocal-93]
_ = x[syncAddLocal-94]
_ = x[syncBothSignature-95]
_ = x[syncSetUnderlying-96]
_ = x[syncLinkname-97]
_ = x[syncStmt1-98]
_ = x[syncStmtsEnd-99]
_ = x[syncDeclare-100]
_ = x[syncTopDecls-101]
_ = x[syncTopConstDecl-102]
_ = x[syncTopFuncDecl-103]
_ = x[syncTopTypeDecl-104]
_ = x[syncTopVarDecl-105]
_ = x[syncObject1-106]
_ = x[syncAddBody-107]
_ = x[syncLabel-108]
_ = x[syncFuncExt-109]
_ = x[syncMethExt-110]
_ = x[syncOptLabel-111]
_ = x[syncScalar-112]
_ = x[syncStmtDecls-113]
_ = x[syncDeclLocal-114]
_ = x[syncObjLocal-115]
_ = x[syncObjLocal1-116]
_ = x[syncDeclareLocal-117]
_ = x[syncPublic-118]
_ = x[syncPrivate-119]
_ = x[syncRelocs-120]
_ = x[syncReloc-121]
_ = x[syncUseReloc-122]
_ = x[syncVarExt-123]
_ = x[syncPkgDef-124]
_ = x[syncTypeExt-125]
_ = x[syncVal-126]
_ = x[syncCodeObj-127]
_ = x[syncPosBase-128]
_ = x[syncLocalIdent-129]
_ = x[syncTypeParamNames-130]
_ = x[syncTypeParamBounds-131]
_ = x[syncImplicitTypes-132]
}
const _syncMarker_name = "NodeBoolInt64Uint64StringPosPkgSymSelectorKindTypeTypePkgSignatureParamOpObjectExprStmtDeclConstDeclFuncDeclTypeDeclVarDeclPragmaValueEOFMethodFuncBodyUseUseObjObjectIdxTypeIdxBOFEntryOpenScopeCloseScopeGlobalLocalDefineDefLocalUseLocalDefGlobalUseGlobalTypeParamsUseLabelDefLabelFuncLitCommonFuncBodyRefLinksymExtHackSetlinenoNameImportDeclDeclNamesDeclNameExprListExprsWrapnameTypeExprTypeExprOrNilChanDirParamsCloseAnotherScopeSumUnOpBinOpStructTypeInterfaceTypePacknameEmbeddedStmtsStmtsFallStmtFallBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtCompLit1234NDefImplicitUseNameUseObjLocalAddLocalBothSignatureSetUnderlyingLinknameStmt1StmtsEndDeclareTopDeclsTopConstDeclTopFuncDeclTopTypeDeclTopVarDeclObject1AddBodyLabelFuncExtMethExtOptLabelScalarStmtDeclsDeclLocalObjLocalObjLocal1DeclareLocalPublicPrivateRelocsRelocUseRelocVarExtPkgDefTypeExtValCodeObjPosBaseLocalIdentTypeParamNamesTypeParamBoundsImplicitTypes"
var _syncMarker_index = [...]uint16{0, 4, 8, 13, 19, 25, 28, 31, 34, 42, 46, 50, 57, 66, 71, 73, 79, 83, 87, 91, 100, 108, 116, 123, 129, 134, 137, 143, 151, 154, 160, 169, 176, 179, 184, 193, 203, 209, 214, 220, 228, 236, 245, 254, 264, 272, 280, 287, 297, 304, 314, 318, 327, 331, 341, 350, 358, 366, 371, 379, 387, 400, 407, 413, 430, 433, 437, 442, 452, 465, 473, 481, 486, 495, 503, 512, 518, 525, 535, 544, 554, 564, 574, 579, 590, 597, 598, 599, 600, 601, 602, 613, 620, 631, 639, 652, 665, 673, 678, 686, 693, 701, 713, 724, 735, 745, 752, 759, 764, 771, 778, 786, 792, 801, 810, 818, 827, 839, 845, 852, 858, 863, 871, 877, 883, 890, 893, 900, 907, 917, 931, 946, 959}
func (i syncMarker) String() string {
i -= 1
if i < 0 || i >= syncMarker(len(_syncMarker_index)-1) {
return "syncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
}
return _syncMarker_name[_syncMarker_index[i]:_syncMarker_index[i+1]]
}

View file

@ -85,7 +85,15 @@ func stringtoruneslit(n *ir.ConvExpr) ir.Node {
// etc. Corresponds to typecheck.tcConv.
func transformConv(n *ir.ConvExpr) ir.Node {
t := n.X.Type()
op, _ := typecheck.Convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
op, why := typecheck.Convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
if op == ir.OXXX {
// types2 currently ignores pragmas, so a 'notinheap' mismatch is the
// one type-related error that it does not catch. This error will be
// caught here by Convertop (see two checks near beginning of
// Convertop) and reported at the end of noding.
base.ErrorfAt(n.Pos(), "cannot convert %L to type %v%s", n.X, n.Type(), why)
return n
}
n.SetOp(op)
switch n.Op() {
case ir.OCONVNOP:
@ -122,7 +130,8 @@ func transformConvCall(n *ir.CallExpr) ir.Node {
}
// transformCall transforms a normal function/method call. Corresponds to last half
// (non-conversion, non-builtin part) of typecheck.tcCall.
// (non-conversion, non-builtin part) of typecheck.tcCall. This code should work even
// in the case of OCALL/OFUNCINST.
func transformCall(n *ir.CallExpr) {
// n.Type() can be nil for calls with no return value
assert(n.Typecheck() == 1)
@ -148,10 +157,11 @@ func transformCall(n *ir.CallExpr) {
n.SetOp(ir.OCALLFUNC)
}
typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args)
typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, false)
if l.Op() == ir.ODOTMETH && len(deref(n.X.Type().Recv().Type).RParams()) == 0 {
typecheck.FixMethodCall(n)
}
if t.NumResults() == 1 {
n.SetType(l.Type().Results().Field(0).Type)
if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {
if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" {
// Emit code for runtime.getg() directly instead of calling function.
@ -185,7 +195,7 @@ func transformCompare(n *ir.BinaryExpr) {
aop, _ := typecheck.Assignop(lt, rt)
if aop != ir.OXXX {
types.CalcSize(lt)
if rt.IsInterface() == lt.IsInterface() || lt.Width >= 1<<16 {
if lt.HasTParam() || rt.IsInterface() == lt.IsInterface() || lt.Width >= 1<<16 {
l = ir.NewConvExpr(base.Pos, aop, rt, l)
l.SetTypecheck(1)
}
@ -198,7 +208,7 @@ func transformCompare(n *ir.BinaryExpr) {
aop, _ := typecheck.Assignop(rt, lt)
if aop != ir.OXXX {
types.CalcSize(rt)
if rt.IsInterface() == lt.IsInterface() || rt.Width >= 1<<16 {
if rt.HasTParam() || rt.IsInterface() == lt.IsInterface() || rt.Width >= 1<<16 {
r = ir.NewConvExpr(base.Pos, aop, lt, r)
r.SetTypecheck(1)
}
@ -303,6 +313,10 @@ assignOK:
r := r.(*ir.TypeAssertExpr)
stmt.SetOp(ir.OAS2DOTTYPE)
r.SetOp(ir.ODOTTYPE2)
case ir.ODYNAMICDOTTYPE:
r := r.(*ir.DynamicTypeAssertExpr)
stmt.SetOp(ir.OAS2DOTTYPE)
r.SetOp(ir.ODYNAMICDOTTYPE2)
default:
break assignOK
}
@ -323,11 +337,22 @@ assignOK:
stmt := stmt.(*ir.AssignListStmt)
stmt.SetOp(ir.OAS2FUNC)
r := rhs[0].(*ir.CallExpr)
r.Use = ir.CallUseList
rtyp := r.Type()
mismatched := false
failed := false
for i := range lhs {
checkLHS(i, rtyp.Field(i).Type)
result := rtyp.Field(i).Type
checkLHS(i, result)
if lhs[i].Type() == nil || result == nil {
failed = true
} else if lhs[i] != ir.BlankNode && !types.Identical(lhs[i].Type(), result) {
mismatched = true
}
}
if mismatched && !failed {
typecheck.RewriteMultiValueCall(stmt, r)
}
return
}
@ -340,12 +365,12 @@ assignOK:
}
}
// Corresponds to typecheck.typecheckargs.
// Corresponds to typecheck.typecheckargs. Really just deals with multi-value calls.
func transformArgs(n ir.InitNode) {
var list []ir.Node
switch n := n.(type) {
default:
base.Fatalf("typecheckargs %+v", n.Op())
base.Fatalf("transformArgs %+v", n.Op())
case *ir.CallExpr:
list = n.Args
if n.IsDDD {
@ -363,46 +388,13 @@ func transformArgs(n ir.InitNode) {
return
}
// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
// Save n as n.Orig for fmt.go.
if ir.Orig(n) == n {
n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
}
as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
as.Rhs.Append(list...)
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
// init.go hasn't yet created it. Instead, associate the
// temporary variables with InitTodoFunc for now, and init.go
// will reassociate them later when it's appropriate.
static := ir.CurFunc == nil
if static {
ir.CurFunc = typecheck.InitTodoFunc
}
list = nil
for _, f := range t.FieldSlice() {
t := typecheck.Temp(f.Type)
as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t))
as.Lhs.Append(t)
list = append(list, t)
}
if static {
ir.CurFunc = nil
}
switch n := n.(type) {
case *ir.CallExpr:
n.Args = list
case *ir.ReturnStmt:
n.Results = list
}
transformAssign(as, as.Lhs, as.Rhs)
as.SetTypecheck(1)
n.PtrInit().Append(as)
// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
typecheck.RewriteMultiValueCall(n, list[0])
}
// assignconvfn converts node n for assignment to type t. Corresponds to
@ -416,7 +408,10 @@ func assignconvfn(n ir.Node, t *types.Type) ir.Node {
return n
}
op, _ := typecheck.Assignop(n.Type(), t)
op, why := typecheck.Assignop(n.Type(), t)
if op == ir.OXXX {
base.Fatalf("found illegal assignment %+v -> %+v; %s", n.Type(), t, why)
}
r := ir.NewConvExpr(base.Pos, op, t, n)
r.SetTypecheck(1)
@ -424,8 +419,11 @@ func assignconvfn(n ir.Node, t *types.Type) ir.Node {
return r
}
// Corresponds to typecheck.typecheckaste.
func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes) {
// Corresponds to typecheck.typecheckaste, but we add an extra flag convifaceOnly
// only. If convifaceOnly is true, we only do interface conversion. We use this to do
// early insertion of CONVIFACE nodes during noder2, when the function or args may
// have typeparams.
func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, convifaceOnly bool) {
var t *types.Type
var i int
@ -444,7 +442,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
if isddd {
n = nl[i]
ir.SetPos(n)
if n.Type() != nil {
if n.Type() != nil && (!convifaceOnly || t.IsInterface()) {
nl[i] = assignconvfn(n, t)
}
return
@ -454,7 +452,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
for ; i < len(nl); i++ {
n = nl[i]
ir.SetPos(n)
if n.Type() != nil {
if n.Type() != nil && (!convifaceOnly || t.IsInterface()) {
nl[i] = assignconvfn(n, t.Elem())
}
}
@ -463,7 +461,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
n = nl[i]
ir.SetPos(n)
if n.Type() != nil {
if n.Type() != nil && (!convifaceOnly || t.IsInterface()) {
nl[i] = assignconvfn(n, t)
}
i++
@ -485,7 +483,7 @@ func transformReturn(rs *ir.ReturnStmt) {
return
}
typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), nl)
typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), nl, false)
}
// transformSelect transforms a select node, creating an assignment list as needed
@ -537,13 +535,31 @@ func transformAsOp(n *ir.AssignOpStmt) {
}
// transformDot transforms an OXDOT (or ODOT) or ODOT, ODOTPTR, ODOTMETH,
// ODOTINTER, or OCALLPART, as appropriate. It adds in extra nodes as needed to
// ODOTINTER, or OMETHVALUE, as appropriate. It adds in extra nodes as needed to
// access embedded fields. Corresponds to typecheck.tcDot.
func transformDot(n *ir.SelectorExpr, isCall bool) ir.Node {
assert(n.Type() != nil && n.Typecheck() == 1)
if n.Op() == ir.OXDOT {
n = typecheck.AddImplicitDots(n)
n.SetOp(ir.ODOT)
// Set the Selection field and typecheck flag for any new ODOT nodes
// added by AddImplicitDots(), and also transform to ODOTPTR if
// needed. Equivalent to 'n.X = typecheck(n.X, ctxExpr|ctxType)' in
// tcDot.
for n1 := n; n1.X.Op() == ir.ODOT; {
n1 = n1.X.(*ir.SelectorExpr)
if !n1.Implicit() {
break
}
t1 := n1.X.Type()
if t1.IsPtr() && !t1.Elem().IsInterface() {
t1 = t1.Elem()
n1.SetOp(ir.ODOTPTR)
}
typecheck.Lookdot(n1, t1, 0)
n1.SetTypecheck(1)
}
}
t := n.X.Type()
@ -561,8 +577,13 @@ func transformDot(n *ir.SelectorExpr, isCall bool) ir.Node {
assert(f != nil)
if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && !isCall {
n.SetOp(ir.OCALLPART)
n.SetType(typecheck.MethodValueWrapper(n).Type())
n.SetOp(ir.OMETHVALUE)
if len(n.X.Type().RParams()) > 0 || n.X.Type().IsPtr() && len(n.X.Type().Elem().RParams()) > 0 {
// TODO: MethodValueWrapper needed for generics?
// Or did we successfully desugar all that at stencil time?
return n
}
n.SetType(typecheck.NewMethodType(n.Type(), nil))
}
return n
}
@ -594,7 +615,11 @@ func transformMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
s := n.Sel
m := typecheck.Lookdot1(n, s, t, ms, 0)
assert(m != nil)
if !t.HasShape() {
// It's OK to not find the method if t is instantiated by shape types,
// because we will use the methods on the generic type anyway.
assert(m != nil)
}
n.SetOp(ir.OMETHEXPR)
n.Selection = m
@ -911,9 +936,7 @@ func transformCompLit(n *ir.CompLitExpr) (res ir.Node) {
f := t.Field(i)
n1 = assignconvfn(n1, f.Type)
sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1)
sk.Offset = f.Offset
ls[i] = sk
ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
}
assert(len(ls) >= t.NumFields())
} else {
@ -922,33 +945,26 @@ func transformCompLit(n *ir.CompLitExpr) (res ir.Node) {
for i, l := range ls {
ir.SetPos(l)
if l.Op() == ir.OKEY {
kv := l.(*ir.KeyExpr)
key := kv.Key
kv := l.(*ir.KeyExpr)
key := kv.Key
// Sym might have resolved to name in other top-level
// package, because of import dot. Redirect to correct sym
// before we do the lookup.
s := key.Sym()
if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
s = typecheck.Lookup(s.Name)
}
// An OXDOT uses the Sym field to hold
// the field to the right of the dot,
// so s will be non-nil, but an OXDOT
// is never a valid struct literal key.
assert(!(s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank()))
l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value)
ls[i] = l
// Sym might have resolved to name in other top-level
// package, because of import dot. Redirect to correct sym
// before we do the lookup.
s := key.Sym()
if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
s = typecheck.Lookup(s.Name)
}
assert(l.Op() == ir.OSTRUCTKEY)
l := l.(*ir.StructKeyExpr)
// An OXDOT uses the Sym field to hold
// the field to the right of the dot,
// so s will be non-nil, but an OXDOT
// is never a valid struct literal key.
assert(!(s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank()))
f := typecheck.Lookdot1(nil, l.Field, t, t.Fields(), 0)
l.Offset = f.Offset
f := typecheck.Lookdot1(nil, s, t, t.Fields(), 0)
l := ir.NewStructKeyExpr(l.Pos(), f, kv.Value)
ls[i] = l
l.Value = assignconvfn(l.Value, f.Type)
}

View file

@ -39,6 +39,11 @@ func (g *irgen) typ(typ types2.Type) *types.Type {
// recursive types have been fully constructed before we call CheckSize.
if res != nil && !res.IsUntyped() && !res.IsFuncArgStruct() && !res.HasTParam() {
types.CheckSize(res)
if res.IsPtr() {
// Pointers always have their size set, even though their element
// may not have its size set.
types.CheckSize(res.Elem())
}
}
return res
}
@ -68,8 +73,10 @@ func instTypeName2(name string, targs []types2.Type) string {
if i > 0 {
b.WriteByte(',')
}
// Include package names for all types, including typeparams, to
// make sure type arguments are uniquely specified.
tname := types2.TypeString(targ,
func(*types2.Package) string { return "" })
func(pkg *types2.Package) string { return pkg.Name() })
if strings.Index(tname, ", ") >= 0 {
// types2.TypeString puts spaces after a comma in a type
// list, but we don't want spaces in our actual type names
@ -89,50 +96,49 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
case *types2.Basic:
return g.basic(typ)
case *types2.Named:
if typ.TParams() != nil {
// If tparams is set, but targs is not, typ is a base generic
// type. typ is appearing as part of the source type of an alias,
// since that is the only use of a generic type that doesn't
// involve instantiation. We just translate the named type in the
// normal way below using g.obj().
if typ.TParams() != nil && typ.TArgs() != nil {
// typ is an instantiation of a defined (named) generic type.
// This instantiation should also be a defined (named) type.
// types2 gives us the substituted type in t.Underlying()
// The substituted type may or may not still have type
// params. We might, for example, be substituting one type
// param for another type param.
if typ.TArgs() == nil {
base.Fatalf("In typ0, Targs should be set if TParams is set")
}
// When converted to types.Type, typ must have a name,
// based on the names of the type arguments. We need a
// name to deal with recursive generic types (and it also
// looks better when printing types).
//
// When converted to types.Type, typ has a unique name,
// based on the names of the type arguments.
instName := instTypeName2(typ.Obj().Name(), typ.TArgs())
s := g.pkg(typ.Obj().Pkg()).Lookup(instName)
if s.Def != nil {
// We have already encountered this instantiation,
// so use the type we previously created, since there
// We have already encountered this instantiation.
// Use the type we previously created, since there
// must be exactly one instance of a defined type.
return s.Def.Type()
}
// Create a forwarding type first and put it in the g.typs
// map, in order to deal with recursive generic types.
// Fully set up the extra ntyp information (Def, RParams,
// which may set HasTParam) before translating the
// underlying type itself, so we handle recursion
// correctly, including via method signatures.
ntyp := newIncompleteNamedType(g.pos(typ.Obj().Pos()), s)
// map, in order to deal with recursive generic types
// (including via method signatures).. Set up the extra
// ntyp information (Def, RParams, which may set
// HasTParam) before translating the underlying type
// itself, so we handle recursion correctly.
ntyp := typecheck.NewIncompleteNamedType(g.pos(typ.Obj().Pos()), s)
g.typs[typ] = ntyp
// If ntyp still has type params, then we must be
// referencing something like 'value[T2]', as when
// specifying the generic receiver of a method,
// where value was defined as "type value[T any]
// ...". Save the type args, which will now be the
// new type of the current type.
// specifying the generic receiver of a method, where
// value was defined as "type value[T any] ...". Save the
// type args, which will now be the new typeparams of the
// current type.
//
// If ntyp does not have type params, we are saving the
// concrete types used to instantiate this type. We'll use
// these when instantiating the methods of the
// non-generic types used to instantiate this type. We'll
// use these when instantiating the methods of the
// instantiated type.
rparams := make([]*types.Type, len(typ.TArgs()))
for i, targ := range typ.TArgs() {
@ -143,6 +149,8 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
ntyp.SetUnderlying(g.typ1(typ.Underlying()))
g.fillinMethods(typ, ntyp)
// Save the symbol for the base generic type.
ntyp.OrigSym = g.pkg(typ.Obj().Pkg()).Lookup(typ.Obj().Name())
return ntyp
}
obj := g.obj(typ.Obj())
@ -183,12 +191,9 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
for i := range embeddeds {
// TODO(mdempsky): Get embedding position.
e := typ.EmbeddedType(i)
if t := types2.AsInterface(e); t != nil && t.IsComparable() {
// Ignore predefined type 'comparable', since it
// doesn't resolve and it doesn't have any
// relevant methods.
continue
}
// With Go 1.18, an embedded element can be any type, not
// just an interface.
embeddeds[j] = types.NewField(src.NoXPos, nil, g.typ1(e))
j++
}
@ -204,20 +209,39 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
return types.NewInterface(g.tpkg(typ), append(embeddeds, methods...))
case *types2.TypeParam:
tp := types.NewTypeParam(g.tpkg(typ))
// Save the name of the type parameter in the sym of the type.
// Include the types2 subscript in the sym name
sym := g.pkg(typ.Obj().Pkg()).Lookup(types2.TypeString(typ, func(*types2.Package) string { return "" }))
tp.SetSym(sym)
pkg := g.tpkg(typ)
sym := pkg.Lookup(types2.TypeString(typ, func(*types2.Package) string { return "" }))
if sym.Def != nil {
// Make sure we use the same type param type for the same
// name, whether it is created during types1-import or
// this types2-to-types1 translation.
return sym.Def.Type()
}
tp := types.NewTypeParam(sym, typ.Index())
nname := ir.NewDeclNameAt(g.pos(typ.Obj().Pos()), ir.OTYPE, sym)
sym.Def = nname
nname.SetType(tp)
tp.SetNod(nname)
// Set g.typs[typ] in case the bound methods reference typ.
g.typs[typ] = tp
// TODO(danscales): we don't currently need to use the bounds
// anywhere, so eventually we can probably remove.
bound := g.typ1(typ.Bound())
*tp.Methods() = *bound.Methods()
bound := g.typ1(typ.Constraint())
tp.SetBound(bound)
return tp
case *types2.Union:
nt := typ.Len()
tlist := make([]*types.Type, nt)
tildes := make([]bool, nt)
for i := range tlist {
t := typ.Term(i)
tlist[i] = g.typ1(t.Type())
tildes[i] = t.Tilde()
}
return types.NewUnion(tlist, tildes)
case *types2.Tuple:
// Tuples are used for the type of a function call (i.e. the
// return value of the function).
@ -243,19 +267,23 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
// and for actually generating the methods for instantiated types.
func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
if typ.NumMethods() != 0 {
targs := make([]ir.Node, len(typ.TArgs()))
targs := make([]*types.Type, len(typ.TArgs()))
for i, targ := range typ.TArgs() {
targs[i] = ir.TypeNode(g.typ1(targ))
targs[i] = g.typ1(targ)
}
methods := make([]*types.Field, typ.NumMethods())
for i := range methods {
m := typ.Method(i)
meth := g.obj(m)
recvType := types2.AsSignature(m.Type()).Recv().Type()
ptr := types2.AsPointer(recvType)
if ptr != nil {
recvType = ptr.Elem()
recvType := deref2(types2.AsSignature(m.Type()).Recv().Type())
var meth *ir.Name
if m.Pkg() != g.self {
// Imported methods cannot be loaded by name (what
// g.obj() does) - they must be loaded via their
// type.
meth = g.obj(recvType.(*types2.Named).Obj()).Type().Methods().Index(i).Nname.(*ir.Name)
} else {
meth = g.obj(m)
}
if recvType != types2.Type(typ) {
// Unfortunately, meth is the type of the method of the
@ -276,18 +304,21 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
} else {
meth2 = ir.NewNameAt(meth.Pos(), newsym)
rparams := types2.AsSignature(m.Type()).RParams()
tparams := make([]*types.Field, len(rparams))
for i, rparam := range rparams {
tparams[i] = types.NewField(src.NoXPos, nil, g.typ1(rparam.Type()))
tparams := make([]*types.Type, rparams.Len())
for i := range tparams {
tparams[i] = g.typ1(rparams.At(i).Type())
}
assert(len(tparams) == len(targs))
subst := &subster{
g: g,
tparams: tparams,
targs: targs,
ts := typecheck.Tsubster{
Tparams: tparams,
Targs: targs,
}
// Do the substitution of the type
meth2.SetType(subst.typ(meth.Type()))
meth2.SetType(ts.Typ(meth.Type()))
// Add any new fully instantiated types
// seen during the substitution to
// g.instTypeList.
g.instTypeList = append(g.instTypeList, ts.InstTypeList...)
newsym.Def = meth2
}
meth = meth2
@ -296,7 +327,7 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
methods[i].Nname = meth
}
ntyp.Methods().Set(methods)
if !ntyp.HasTParam() {
if !ntyp.HasTParam() && !ntyp.HasShape() {
// Generate all the methods for a new fully-instantiated type.
g.instTypeList = append(g.instTypeList, ntyp)
}
@ -305,9 +336,9 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
func (g *irgen) signature(recv *types.Field, sig *types2.Signature) *types.Type {
tparams2 := sig.TParams()
tparams := make([]*types.Field, len(tparams2))
tparams := make([]*types.Field, tparams2.Len())
for i := range tparams {
tp := tparams2[i]
tp := tparams2.At(i)
tparams[i] = types.NewField(g.pos(tp), g.sym(tp), g.typ1(tp.Type()))
}
@ -346,7 +377,7 @@ func (g *irgen) selector(obj types2.Object) *types.Sym {
return pkg.Lookup(name)
}
// tpkg returns the package that a function, interface, or struct type
// tpkg returns the package that a function, interface, struct, or typeparam type
// expression appeared in.
//
// Caveat: For the degenerate types "func()", "interface{}", and
@ -356,36 +387,39 @@ func (g *irgen) selector(obj types2.Object) *types.Sym {
// particular types is because go/types does *not* report it for
// them. So in practice this limitation is probably moot.
func (g *irgen) tpkg(typ types2.Type) *types.Pkg {
anyObj := func() types2.Object {
switch typ := typ.(type) {
case *types2.Signature:
if recv := typ.Recv(); recv != nil {
return recv
}
if params := typ.Params(); params.Len() > 0 {
return params.At(0)
}
if results := typ.Results(); results.Len() > 0 {
return results.At(0)
}
case *types2.Struct:
if typ.NumFields() > 0 {
return typ.Field(0)
}
case *types2.Interface:
if typ.NumExplicitMethods() > 0 {
return typ.ExplicitMethod(0)
}
}
return nil
}
if obj := anyObj(); obj != nil {
if obj := anyObj(typ); obj != nil {
return g.pkg(obj.Pkg())
}
return types.LocalPkg
}
// anyObj returns some object accessible from typ, if any.
func anyObj(typ types2.Type) types2.Object {
switch typ := typ.(type) {
case *types2.Signature:
if recv := typ.Recv(); recv != nil {
return recv
}
if params := typ.Params(); params.Len() > 0 {
return params.At(0)
}
if results := typ.Results(); results.Len() > 0 {
return results.At(0)
}
case *types2.Struct:
if typ.NumFields() > 0 {
return typ.Field(0)
}
case *types2.Interface:
if typ.NumExplicitMethods() > 0 {
return typ.ExplicitMethod(0)
}
case *types2.TypeParam:
return typ.Obj()
}
return nil
}
func (g *irgen) basic(typ *types2.Basic) *types.Type {
switch typ.Name() {
case "byte":
@ -430,3 +464,11 @@ var dirs = [...]types.ChanDir{
types2.SendOnly: types.Csend,
types2.RecvOnly: types.Crecv,
}
// deref2 does a single deref of types2 type t, if it is a pointer type.
func deref2(t types2.Type) types2.Type {
if ptr := types2.AsPointer(t); ptr != nil {
t = ptr.Elem()
}
return t
}

View file

@ -0,0 +1,340 @@
// UNREVIEWED
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder
import (
"bytes"
"fmt"
"internal/goversion"
"io"
"runtime"
"sort"
"cmd/compile/internal/base"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
"cmd/internal/src"
)
// localPkgReader holds the package reader used for reading the local
// package. It exists so the unified IR linker can refer back to it
// later.
var localPkgReader *pkgReader
// unified construct the local package's IR from syntax's AST.
//
// The pipeline contains 2 steps:
//
// (1) Generate package export data "stub".
//
// (2) Generate package IR from package export data.
//
// The package data "stub" at step (1) contains everything from the local package,
// but nothing that have been imported. When we're actually writing out export data
// to the output files (see writeNewExport function), we run the "linker", which does
// a few things:
//
// + Updates compiler extensions data (e.g., inlining cost, escape analysis results).
//
// + Handles re-exporting any transitive dependencies.
//
// + Prunes out any unnecessary details (e.g., non-inlineable functions, because any
// downstream importers only care about inlinable functions).
//
// The source files are typechecked twice, once before writing export data
// using types2 checker, once after read export data using gc/typecheck.
// This duplication of work will go away once we always use types2 checker,
// we can remove the gc/typecheck pass. The reason it is still here:
//
// + It reduces engineering costs in maintaining a fork of typecheck
// (e.g., no need to backport fixes like CL 327651).
//
// + It makes it easier to pass toolstash -cmp.
//
// + Historically, we would always re-run the typechecker after import, even though
// we know the imported data is valid. It's not ideal, but also not causing any
// problem either.
//
// + There's still transformation that being done during gc/typecheck, like rewriting
// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP.
//
// Using syntax+types2 tree, which already has a complete representation of generics,
// the unified IR has the full typed AST for doing introspection during step (1).
// In other words, we have all necessary information to build the generic IR form
// (see writer.captureVars for an example).
func unified(noders []*noder) {
inline.NewInline = InlineCall
if !quirksMode() {
writeNewExportFunc = writeNewExport
} else if base.Flag.G != 0 {
base.Errorf("cannot use -G and -d=quirksmode together")
}
newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
pr := newPkgDecoder(pkg1.Path, data)
// Read package descriptors for both types2 and compiler backend.
readPackage(newPkgReader(pr), pkg1)
pkg2 = readPackage2(check, packages, pr)
return
}
data := writePkgStub(noders)
// We already passed base.Flag.Lang to types2 to handle validating
// the user's source code. Bump it up now to the current version and
// re-parse, so typecheck doesn't complain if we construct IR that
// utilizes newer Go features.
base.Flag.Lang = fmt.Sprintf("go1.%d", goversion.Version)
types.ParseLangFlag()
assert(types.LocalPkg.Path == "")
types.LocalPkg.Height = 0 // reset so pkgReader.pkgIdx doesn't complain
target := typecheck.Target
typecheck.TypecheckAllowed = true
localPkgReader = newPkgReader(newPkgDecoder(types.LocalPkg.Path, data))
readPackage(localPkgReader, types.LocalPkg)
r := localPkgReader.newReader(relocMeta, privateRootIdx, syncPrivate)
r.ext = r
r.pkgInit(types.LocalPkg, target)
// Type-check any top-level assignments. We ignore non-assignments
// here because other declarations are typechecked as they're
// constructed.
for i, ndecls := 0, len(target.Decls); i < ndecls; i++ {
switch n := target.Decls[i]; n.Op() {
case ir.OAS, ir.OAS2:
target.Decls[i] = typecheck.Stmt(n)
}
}
// Don't use range--bodyIdx can add closures to todoBodies.
for len(todoBodies) > 0 {
// The order we expand bodies doesn't matter, so pop from the end
// to reduce todoBodies reallocations if it grows further.
fn := todoBodies[len(todoBodies)-1]
todoBodies = todoBodies[:len(todoBodies)-1]
pri, ok := bodyReader[fn]
assert(ok)
pri.funcBody(fn)
// Instantiated generic function: add to Decls for typechecking
// and compilation.
if fn.OClosure == nil && len(pri.dict.targs) != 0 {
target.Decls = append(target.Decls, fn)
}
}
todoBodies = nil
if !quirksMode() {
// TODO(mdempsky): Investigate generating wrappers in quirks mode too.
r.wrapTypes(target)
}
// Check that nothing snuck past typechecking.
for _, n := range target.Decls {
if n.Typecheck() == 0 {
base.FatalfAt(n.Pos(), "missed typecheck: %v", n)
}
// For functions, check that at least their first statement (if
// any) was typechecked too.
if fn, ok := n.(*ir.Func); ok && len(fn.Body) != 0 {
if stmt := fn.Body[0]; stmt.Typecheck() == 0 {
base.FatalfAt(stmt.Pos(), "missed typecheck: %v", stmt)
}
}
}
base.ExitIfErrors() // just in case
}
// writePkgStub type checks the given parsed source files,
// writes an export data package stub representing them,
// and returns the result.
func writePkgStub(noders []*noder) string {
m, pkg, info := checkFiles(noders)
pw := newPkgWriter(m, pkg, info)
pw.collectDecls(noders)
publicRootWriter := pw.newWriter(relocMeta, syncPublic)
privateRootWriter := pw.newWriter(relocMeta, syncPrivate)
assert(publicRootWriter.idx == publicRootIdx)
assert(privateRootWriter.idx == privateRootIdx)
{
w := publicRootWriter
w.pkg(pkg)
w.bool(false) // has init; XXX
scope := pkg.Scope()
names := scope.Names()
w.len(len(names))
for _, name := range scope.Names() {
w.obj(scope.Lookup(name), nil)
}
w.sync(syncEOF)
w.flush()
}
{
w := privateRootWriter
w.ext = w
w.pkgInit(noders)
w.flush()
}
var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
pw.dump(&sb)
// At this point, we're done with types2. Make sure the package is
// garbage collected.
freePackage(pkg)
return sb.String()
}
// freePackage ensures the given package is garbage collected.
func freePackage(pkg *types2.Package) {
// The GC test below relies on a precise GC that runs finalizers as
// soon as objects are unreachable. Our implementation provides
// this, but other/older implementations may not (e.g., Go 1.4 does
// not because of #22350). To avoid imposing unnecessary
// restrictions on the GOROOT_BOOTSTRAP toolchain, we skip the test
// during bootstrapping.
if base.CompilerBootstrap {
return
}
// Set a finalizer on pkg so we can detect if/when it's collected.
done := make(chan struct{})
runtime.SetFinalizer(pkg, func(*types2.Package) { close(done) })
// Important: objects involved in cycles are not finalized, so zero
// out pkg to break its cycles and allow the finalizer to run.
*pkg = types2.Package{}
// It typically takes just 1 or 2 cycles to release pkg, but it
// doesn't hurt to try a few more times.
for i := 0; i < 10; i++ {
select {
case <-done:
return
default:
runtime.GC()
}
}
base.Fatalf("package never finalized")
}
func readPackage(pr *pkgReader, importpkg *types.Pkg) {
r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
pkg := r.pkg()
assert(pkg == importpkg)
if r.bool() {
sym := pkg.Lookup(".inittask")
task := ir.NewNameAt(src.NoXPos, sym)
task.Class = ir.PEXTERN
sym.Def = task
}
for i, n := 0, r.len(); i < n; i++ {
r.sync(syncObject)
assert(!r.bool())
idx := r.reloc(relocObj)
assert(r.len() == 0)
path, name, code := r.p.peekObj(idx)
if code != objStub {
objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil}
}
}
}
func writeNewExport(out io.Writer) {
l := linker{
pw: newPkgEncoder(),
pkgs: make(map[string]int),
decls: make(map[*types.Sym]int),
}
publicRootWriter := l.pw.newEncoder(relocMeta, syncPublic)
assert(publicRootWriter.idx == publicRootIdx)
var selfPkgIdx int
{
pr := localPkgReader
r := pr.newDecoder(relocMeta, publicRootIdx, syncPublic)
r.sync(syncPkg)
selfPkgIdx = l.relocIdx(pr, relocPkg, r.reloc(relocPkg))
r.bool() // has init
for i, n := 0, r.len(); i < n; i++ {
r.sync(syncObject)
assert(!r.bool())
idx := r.reloc(relocObj)
assert(r.len() == 0)
xpath, xname, xtag := pr.peekObj(idx)
assert(xpath == pr.pkgPath)
assert(xtag != objStub)
if types.IsExported(xname) {
l.relocIdx(pr, relocObj, idx)
}
}
r.sync(syncEOF)
}
{
var idxs []int
for _, idx := range l.decls {
idxs = append(idxs, idx)
}
sort.Ints(idxs)
w := publicRootWriter
w.sync(syncPkg)
w.reloc(relocPkg, selfPkgIdx)
w.bool(typecheck.Lookup(".inittask").Def != nil)
w.len(len(idxs))
for _, idx := range idxs {
w.sync(syncObject)
w.bool(false)
w.reloc(relocObj, idx)
w.len(0)
}
w.sync(syncEOF)
w.flush()
}
l.pw.dump(out)
}

View file

@ -0,0 +1,153 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package noder_test
import (
"encoding/json"
"flag"
exec "internal/execabs"
"os"
"reflect"
"runtime"
"strings"
"testing"
)
var (
flagPkgs = flag.String("pkgs", "std", "list of packages to compare (ignored in -short mode)")
flagAll = flag.Bool("all", false, "enable testing of all GOOS/GOARCH targets")
flagParallel = flag.Bool("parallel", false, "test GOOS/GOARCH targets in parallel")
)
// TestUnifiedCompare implements a test similar to running:
//
// $ go build -toolexec="toolstash -cmp" std
//
// The -pkgs flag controls the list of packages tested.
//
// By default, only the native GOOS/GOARCH target is enabled. The -all
// flag enables testing of non-native targets. The -parallel flag
// additionally enables testing of targets in parallel.
//
// Caution: Testing all targets is very resource intensive! On an IBM
// P920 (dual Intel Xeon Gold 6154 CPUs; 36 cores, 192GB RAM), testing
// all targets in parallel takes about 5 minutes. Using the 'go test'
// command's -run flag for subtest matching is recommended for less
// powerful machines.
func TestUnifiedCompare(t *testing.T) {
targets, err := exec.Command("go", "tool", "dist", "list").Output()
if err != nil {
t.Fatal(err)
}
for _, target := range strings.Fields(string(targets)) {
t.Run(target, func(t *testing.T) {
parts := strings.Split(target, "/")
goos, goarch := parts[0], parts[1]
if !(*flagAll || goos == runtime.GOOS && goarch == runtime.GOARCH) {
t.Skip("skipping non-native target (use -all to enable)")
}
if *flagParallel {
t.Parallel()
}
pkgs1 := loadPackages(t, goos, goarch, "-d=unified=0 -d=inlfuncswithclosures=0 -d=unifiedquirks=1 -G=0")
pkgs2 := loadPackages(t, goos, goarch, "-d=unified=1 -d=inlfuncswithclosures=0 -d=unifiedquirks=1 -G=0")
if len(pkgs1) != len(pkgs2) {
t.Fatalf("length mismatch: %v != %v", len(pkgs1), len(pkgs2))
}
for i := range pkgs1 {
pkg1 := pkgs1[i]
pkg2 := pkgs2[i]
path := pkg1.ImportPath
if path != pkg2.ImportPath {
t.Fatalf("mismatched paths: %q != %q", path, pkg2.ImportPath)
}
// Packages that don't have any source files (e.g., packages
// unsafe, embed/internal/embedtest, and cmd/internal/moddeps).
if pkg1.Export == "" && pkg2.Export == "" {
continue
}
if pkg1.BuildID == pkg2.BuildID {
t.Errorf("package %q: build IDs unexpectedly matched", path)
}
// Unlike toolstash -cmp, we're comparing the same compiler
// binary against itself, just with different flags. So we
// don't need to worry about skipping over mismatched version
// strings, but we do need to account for differing build IDs.
//
// Fortunately, build IDs are cryptographic 256-bit hashes,
// and cmd/go provides us with them up front. So we can just
// use them as delimeters to split the files, and then check
// that the substrings are all equal.
file1 := strings.Split(readFile(t, pkg1.Export), pkg1.BuildID)
file2 := strings.Split(readFile(t, pkg2.Export), pkg2.BuildID)
if !reflect.DeepEqual(file1, file2) {
t.Errorf("package %q: compile output differs", path)
}
}
})
}
}
type pkg struct {
ImportPath string
Export string
BuildID string
Incomplete bool
}
func loadPackages(t *testing.T, goos, goarch, gcflags string) []pkg {
args := []string{"list", "-e", "-export", "-json", "-gcflags=all=" + gcflags, "--"}
if testing.Short() {
t.Log("short testing mode; only testing package runtime")
args = append(args, "runtime")
} else {
args = append(args, strings.Fields(*flagPkgs)...)
}
cmd := exec.Command("go", args...)
cmd.Env = append(os.Environ(), "GOOS="+goos, "GOARCH="+goarch)
cmd.Stderr = os.Stderr
t.Logf("running %v", cmd)
stdout, err := cmd.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
var res []pkg
for dec := json.NewDecoder(stdout); dec.More(); {
var pkg pkg
if err := dec.Decode(&pkg); err != nil {
t.Fatal(err)
}
if pkg.Incomplete {
t.Fatalf("incomplete package: %q", pkg.ImportPath)
}
res = append(res, pkg)
}
if err := cmd.Wait(); err != nil {
t.Fatal(err)
}
return res
}
func readFile(t *testing.T, name string) string {
buf, err := os.ReadFile(name)
if err != nil {
t.Fatal(err)
}
return string(buf)
}

View file

@ -55,7 +55,15 @@ func (g *irgen) validate(n syntax.Node) {
case *syntax.CallExpr:
tv := g.info.Types[n.Fun]
if tv.IsBuiltin() {
switch builtin := n.Fun.(type) {
fun := n.Fun
for {
builtin, ok := fun.(*syntax.ParenExpr)
if !ok {
break
}
fun = builtin.X
}
switch builtin := fun.(type) {
case *syntax.Name:
g.validateBuiltin(builtin.Value, n)
case *syntax.SelectorExpr:

File diff suppressed because it is too large Load diff

View file

@ -304,7 +304,7 @@ func (d *initDeps) visit(n ir.Node) {
n := n.(*ir.ClosureExpr)
d.inspectList(n.Func.Body)
case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
case ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
d.foundDep(ir.MethodExprName(n))
}
}

View file

@ -20,7 +20,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnopdefer
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue

View file

@ -53,30 +53,3 @@ func ginsnop(pp *objw.Progs) *obj.Prog {
p.To.Reg = ppc64.REG_R0
return p
}
func ginsnopdefer(pp *objw.Progs) *obj.Prog {
// On PPC64 two nops are required in the defer case.
//
// (see gc/cgen.go, gc/plive.go -- copy of comment below)
//
// On ppc64, when compiling Go into position
// independent code on ppc64le we insert an
// instruction to reload the TOC pointer from the
// stack as well. See the long comment near
// jmpdefer in runtime/asm_ppc64.s for why.
// If the MOVD is not needed, insert a hardware NOP
// so that the same number of instructions are used
// on ppc64 in both shared and non-shared modes.
ginsnop(pp)
if base.Ctxt.Flag_shared {
p := pp.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_MEM
p.From.Offset = 24
p.From.Reg = ppc64.REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R2
return p
}
return ginsnop(pp)
}

View file

@ -679,8 +679,7 @@ func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
fn := typecheck.LookupRuntime("memequal")
fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)})
typecheck.Call(call)
call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(slen)}, false).(*ir.CallExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
@ -716,8 +715,7 @@ func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata})
typecheck.Call(call)
call := typecheck.Call(base.Pos, fn, []ir.Node{stab, sdata, tdata}, false).(*ir.CallExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)

View file

@ -28,23 +28,13 @@ import (
"cmd/internal/src"
)
type itabEntry struct {
t, itype *types.Type
lsym *obj.LSym // symbol of the itab itself
// symbols of each method in
// the itab, sorted by byte offset;
// filled in by CompileITabs
entries []*obj.LSym
}
type ptabEntry struct {
s *types.Sym
t *types.Type
}
func CountTabs() (numPTabs, numITabs int) {
return len(ptabs), len(itabs)
func CountPTabs() int {
return len(ptabs)
}
// runtime interface and reflection data structures
@ -56,7 +46,6 @@ var (
gcsymmu sync.Mutex // protects gcsymset and gcsymslice
gcsymset = make(map[*types.Type]struct{})
itabs []itabEntry
ptabs []*ir.Name
)
@ -313,6 +302,10 @@ func MapIterType(t *types.Type) *types.Type {
// methods returns the methods of the non-interface type t, sorted by name.
// Generates stub functions as needed.
func methods(t *types.Type) []*typeSig {
if t.HasShape() {
// Shape types have no methods.
return nil
}
// method type
mt := types.ReceiverBaseType(t)
@ -321,13 +314,6 @@ func methods(t *types.Type) []*typeSig {
}
typecheck.CalcMethods(mt)
// type stored in interface word
it := t
if !types.IsDirectIface(it) {
it = types.NewPtr(t)
}
// make list of methods for t,
// generating code if necessary.
var ms []*typeSig
@ -355,8 +341,8 @@ func methods(t *types.Type) []*typeSig {
sig := &typeSig{
name: f.Sym,
isym: methodWrapper(it, f),
tsym: methodWrapper(t, f),
isym: methodWrapper(t, f, true),
tsym: methodWrapper(t, f, false),
type_: typecheck.NewMethodType(f.Type, t),
mtype: typecheck.NewMethodType(f.Type, nil),
}
@ -394,7 +380,7 @@ func imethods(t *types.Type) []*typeSig {
// IfaceType.Method is not in the reflect data.
// Generate the method body, so that compiled
// code can refer to it.
methodWrapper(t, f)
methodWrapper(t, f, false)
}
return methods
@ -735,7 +721,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
}
exported := false
p := t.LongString()
p := t.NameString()
// If we're writing out type T,
// we are very likely to write out type *T as well.
// Use the string "*T"[1:] for "T", so that the two
@ -799,11 +785,11 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
// TrackSym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t.
func TrackSym(t *types.Type, f *types.Field) *obj.LSym {
return base.PkgLinksym("go.track", t.ShortString()+"."+f.Sym.Name, obj.ABI0)
return base.PkgLinksym("go.track", t.LinkString()+"."+f.Sym.Name, obj.ABI0)
}
func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
p := prefix + "." + t.ShortString()
p := prefix + "." + t.LinkString()
s := types.TypeSymLookup(p)
// This function is for looking up type-related generated functions
@ -848,16 +834,28 @@ func TypePtr(t *types.Type) *ir.AddrExpr {
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
func ITabAddr(t, itype *types.Type) *ir.AddrExpr {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
base.Fatalf("ITabAddr(%v, %v)", t, itype)
}
s, existed := ir.Pkgs.Itab.LookupOK(t.ShortString() + "," + itype.ShortString())
// ITabLsym returns the LSym representing the itab for concreate type typ
// implementing interface iface.
func ITabLsym(typ, iface *types.Type) *obj.LSym {
s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
lsym := s.Linksym()
if !existed {
itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
writeITab(lsym, typ, iface)
}
return lsym
}
// ITabAddr returns an expression representing a pointer to the itab
// for concrete type typ implementing interface iface.
func ITabAddr(typ, iface *types.Type) *ir.AddrExpr {
s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
lsym := s.Linksym()
if !existed {
writeITab(lsym, typ, iface)
}
lsym := s.Linksym()
n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
@ -945,25 +943,27 @@ func writeType(t *types.Type) *obj.LSym {
if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
tbase = t.Elem()
}
if tbase.Kind() == types.TFORW {
base.Fatalf("unresolved defined type: %v", tbase)
}
dupok := 0
if tbase.Sym() == nil {
if tbase.Sym() == nil || tbase.HasShape() { // TODO(mdempsky): Probably need DUPOK for instantiated types too.
dupok = obj.DUPOK
}
if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc
// named types from other files are defined only by those files
if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg {
if i := typecheck.BaseTypeIndex(t); i >= 0 {
lsym.Pkg = tbase.Sym().Pkg.Prefix
lsym.SymIdx = int32(i)
lsym.Set(obj.AttrIndexed, true)
}
return lsym
}
// TODO(mdempsky): Investigate whether this can happen.
if tbase.Kind() == types.TFORW {
return lsym
if !NeedEmit(tbase) {
if i := typecheck.BaseTypeIndex(t); i >= 0 {
lsym.Pkg = tbase.Sym().Pkg.Prefix
lsym.SymIdx = int32(i)
lsym.Set(obj.AttrIndexed, true)
}
// TODO(mdempsky): Investigate whether this still happens.
// If we know we don't need to emit code for a type,
// we should have a link-symbol index for it.
// See also TODO in NeedEmit.
return lsym
}
ot := 0
@ -1226,88 +1226,11 @@ func InterfaceMethodOffset(ityp *types.Type, i int64) int64 {
return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8
}
// for each itabEntry, gather the methods on
// the concrete type that implement the interface
func CompileITabs() {
for i := range itabs {
tab := &itabs[i]
methods := genfun(tab.t, tab.itype)
if len(methods) == 0 {
continue
}
tab.entries = methods
}
}
// for the given concrete type and interface
// type, return the (sorted) set of methods
// on the concrete type that implement the interface
func genfun(t, it *types.Type) []*obj.LSym {
if t == nil || it == nil {
return nil
}
sigs := imethods(it)
methods := methods(t)
out := make([]*obj.LSym, 0, len(sigs))
// TODO(mdempsky): Short circuit before calling methods(t)?
// See discussion on CL 105039.
if len(sigs) == 0 {
return nil
}
// both sigs and methods are sorted by name,
// so we can find the intersect in a single pass
for _, m := range methods {
if m.name == sigs[0].name {
out = append(out, m.isym)
sigs = sigs[1:]
if len(sigs) == 0 {
break
}
}
}
if len(sigs) != 0 {
base.Fatalf("incomplete itab")
}
return out
}
// ITabSym uses the information gathered in
// CompileITabs to de-virtualize interface methods.
// Since this is called by the SSA backend, it shouldn't
// generate additional Nodes, Syms, etc.
func ITabSym(it *obj.LSym, offset int64) *obj.LSym {
var syms []*obj.LSym
if it == nil {
return nil
}
for i := range itabs {
e := &itabs[i]
if e.lsym == it {
syms = e.entries
break
}
}
if syms == nil {
return nil
}
// keep this arithmetic in sync with *itab layout
methodnum := int((offset - 2*int64(types.PtrSize) - 8) / int64(types.PtrSize))
if methodnum >= len(syms) {
return nil
}
return syms[methodnum]
}
// NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
func NeedRuntimeType(t *types.Type) {
if t.HasTParam() {
// Generic types don't have a runtime type descriptor (but will
// have a dictionary)
// Generic types don't really exist at run-time and have no runtime
// type descriptor. But we do write out shape types.
return
}
if _, ok := signatset[t]; !ok {
@ -1349,29 +1272,66 @@ func WriteRuntimeTypes() {
}
}
func WriteTabs() {
// process itabs
for _, i := range itabs {
// dump empty itab symbol into i.sym
// type itab struct {
// inter *interfacetype
// _type *_type
// hash uint32
// _ [4]byte
// fun [1]uintptr // variable sized
// }
o := objw.SymPtr(i.lsym, 0, writeType(i.itype), 0)
o = objw.SymPtr(i.lsym, o, writeType(i.t), 0)
o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
o += 4 // skip unused field
for _, fn := range genfun(i.t, i.itype) {
o = objw.SymPtrWeak(i.lsym, o, fn, 0) // method pointer for each method
}
// Nothing writes static itabs, so they are read only.
objw.Global(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
i.lsym.Set(obj.AttrContentAddressable, true)
// writeITab writes the itab for concrete type typ implementing
// interface iface.
func writeITab(lsym *obj.LSym, typ, iface *types.Type) {
// TODO(mdempsky): Fix methodWrapper, geneq, and genhash (and maybe
// others) to stop clobbering these.
oldpos, oldfn := base.Pos, ir.CurFunc
defer func() { base.Pos, ir.CurFunc = oldpos, oldfn }()
if typ == nil || (typ.IsPtr() && typ.Elem() == nil) || typ.IsUntyped() || iface == nil || !iface.IsInterface() || iface.IsEmptyInterface() {
base.Fatalf("writeITab(%v, %v)", typ, iface)
}
sigs := iface.AllMethods().Slice()
entries := make([]*obj.LSym, 0, len(sigs))
// both sigs and methods are sorted by name,
// so we can find the intersection in a single pass
for _, m := range methods(typ) {
if m.name == sigs[0].Sym {
entries = append(entries, m.isym)
if m.isym == nil {
panic("NO ISYM")
}
sigs = sigs[1:]
if len(sigs) == 0 {
break
}
}
if sigs[0].Sym.Name == "==" {
sigs = sigs[1:]
if len(sigs) == 0 {
break
}
}
}
if len(sigs) != 0 {
base.Fatalf("incomplete itab")
}
// dump empty itab symbol into i.sym
// type itab struct {
// inter *interfacetype
// _type *_type
// hash uint32
// _ [4]byte
// fun [1]uintptr // variable sized
// }
o := objw.SymPtr(lsym, 0, writeType(iface), 0)
o = objw.SymPtr(lsym, o, writeType(typ), 0)
o = objw.Uint32(lsym, o, types.TypeHash(typ)) // copy of type hash
o += 4 // skip unused field
for _, fn := range entries {
o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method
}
// Nothing writes static itabs, so they are read only.
objw.Global(lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
lsym.Set(obj.AttrContentAddressable, true)
}
func WriteTabs() {
// process ptabs
if types.LocalPkg.Name == "main" && len(ptabs) > 0 {
ot := 0
@ -1453,7 +1413,7 @@ func WriteBasicTypes() {
type typeAndStr struct {
t *types.Type
short string
short string // "short" here means NameString
regular string
}
@ -1466,8 +1426,13 @@ func (a typesByString) Less(i, j int) bool {
}
// When the only difference between the types is whether
// they refer to byte or uint8, such as **byte vs **uint8,
// the types' ShortStrings can be identical.
// the types' NameStrings can be identical.
// To preserve deterministic sort ordering, sort these by String().
//
// TODO(mdempsky): This all seems suspect. Using LinkString would
// avoid naming collisions, and there shouldn't be a reason to care
// about "byte" vs "uint8": they share the same runtime type
// descriptor anyway.
if a[i].regular != a[j].regular {
return a[i].regular < a[j].regular
}
@ -1741,6 +1706,49 @@ func CollectPTabs() {
}
}
// NeedEmit reports whether typ is a type that we need to emit code
// for (e.g., runtime type descriptors, method wrappers).
func NeedEmit(typ *types.Type) bool {
// TODO(mdempsky): Export data should keep track of which anonymous
// and instantiated types were emitted, so at least downstream
// packages can skip re-emitting them.
//
// Perhaps we can just generalize the linker-symbol indexing to
// track the index of arbitrary types, not just defined types, and
// use its presence to detect this. The same idea would work for
// instantiated generic functions too.
switch sym := typ.Sym(); {
case sym == nil:
// Anonymous type; possibly never seen before or ever again.
// Need to emit to be safe (however, see TODO above).
return true
case sym.Pkg == types.LocalPkg:
// Local defined type; our responsibility.
return true
case base.Ctxt.Pkgpath == "runtime" && (sym.Pkg == types.BuiltinPkg || sym.Pkg == ir.Pkgs.Unsafe):
// Package runtime is responsible for including code for builtin
// types (predeclared and package unsafe).
return true
case typ.IsFullyInstantiated():
// Instantiated type; possibly instantiated with unique type arguments.
// Need to emit to be safe (however, see TODO above).
return true
case typ.HasShape():
// Shape type; need to emit even though it lives in the .shape package.
// TODO: make sure the linker deduplicates them (see dupok in writeType above).
return true
default:
// Should have been emitted by an imported package.
return false
}
}
// Generate a wrapper function to convert from
// a receiver of type T to a receiver of type U.
// That is,
@ -1761,7 +1769,45 @@ func CollectPTabs() {
//
// rcvr - U
// method - M func (t T)(), a TFIELD type struct
func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
//
// Also wraps methods on instantiated generic types for use in itab entries.
// For an instantiated generic type G[int], we generate wrappers like:
// G[int] pointer shaped:
// func (x G[int]) f(arg) {
// .inst.G[int].f(dictionary, x, arg)
// }
// G[int] not pointer shaped:
// func (x *G[int]) f(arg) {
// .inst.G[int].f(dictionary, *x, arg)
// }
// These wrappers are always fully stenciled.
func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym {
orig := rcvr
if forItab && !types.IsDirectIface(rcvr) {
rcvr = rcvr.PtrTo()
}
generic := false
// We don't need a dictionary if we are reaching a method (possibly via an
// embedded field) which is an interface method.
if !types.IsInterfaceMethod(method.Type) {
rcvr1 := rcvr
if rcvr1.IsPtr() {
rcvr1 = rcvr.Elem()
}
if len(rcvr1.RParams()) > 0 {
// If rcvr has rparams, remember method as generic, which
// means we need to add a dictionary to the wrapper.
generic = true
targs := rcvr1.RParams()
for _, t := range targs {
if t.HasShape() {
base.Fatalf("method on type instantiated with shapes targ:%+v rcvr:%+v", t, rcvr)
}
}
}
}
newnam := ir.MethodSym(rcvr, method.Sym)
lsym := newnam.Linksym()
if newnam.Siggen() {
@ -1769,19 +1815,18 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
}
newnam.SetSiggen(true)
if types.Identical(rcvr, method.Type.Recv().Type) {
// Except in quirks mode, unified IR creates its own wrappers.
if base.Debug.Unified != 0 && base.Debug.UnifiedQuirks == 0 {
return lsym
}
// Only generate (*T).M wrappers for T.M in T's own package.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
// For generic methods, we need to generate the wrapper even if the receiver
// types are identical, because we want to add the dictionary.
if !generic && types.Identical(rcvr, method.Type.Recv().Type) {
return lsym
}
// Only generate I.M wrappers for I in I's own package
// but keep doing it for error.Error (was issue #29304).
if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
if !NeedEmit(rcvr) || rcvr.IsPtr() && !NeedEmit(rcvr.Elem()) {
return lsym
}
@ -1802,9 +1847,10 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
nthis := ir.AsNode(tfn.Type().Recv().Nname)
methodrcvr := method.Type.Recv().Type
indirect := rcvr.IsPtr() && rcvr.Elem() == methodrcvr
// generate nil pointer check for better error
if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
if indirect {
// generating wrapper from *T to T.
n := ir.NewIfStmt(base.Pos, nil, nil, nil)
n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
@ -1814,7 +1860,6 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
}
dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
// generate call
// It's not possible to use a tail call when dynamic linking on ppc64le. The
// bad scenario is when a local call is made to the wrapper: the wrapper will
@ -1826,7 +1871,7 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
// Disable tailcall for RegabiArgs for now. The IR does not connect the
// arguments with the OTAILCALL node, and the arguments are not marshaled
// correctly.
if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs {
if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs && !generic {
// generate tail call: adjust pointer receiver and jump to embedded method.
left := dot.X // skip final .M
if !left.Type().IsPtr() {
@ -1837,8 +1882,72 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name)))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
call.Args = ir.ParamNames(tfn.Type())
var call *ir.CallExpr
if generic && dot.X != nthis {
// TODO: for now, we don't try to generate dictionary wrappers for
// any methods involving embedded fields, because we're not
// generating the needed dictionaries in instantiateMethods.
generic = false
}
if generic {
var args []ir.Node
var targs []*types.Type
if rcvr.IsPtr() {
targs = rcvr.Elem().RParams()
} else {
targs = rcvr.RParams()
}
if strings.HasPrefix(ir.MethodSym(orig, method.Sym).Name, ".inst.") {
fmt.Printf("%s\n", ir.MethodSym(orig, method.Sym).Name)
panic("multiple .inst.")
}
// The wrapper for an auto-generated pointer/non-pointer
// receiver method should share the same dictionary as the
// corresponding original (user-written) method.
baseOrig := orig
if baseOrig.IsPtr() && !method.Type.Recv().Type.IsPtr() {
baseOrig = baseOrig.Elem()
} else if !baseOrig.IsPtr() && method.Type.Recv().Type.IsPtr() {
baseOrig = types.NewPtr(baseOrig)
}
args = append(args, getDictionary(ir.MethodSym(baseOrig, method.Sym), targs))
if indirect {
args = append(args, ir.NewStarExpr(base.Pos, dot.X))
} else if methodrcvr.IsPtr() && methodrcvr.Elem() == dot.X.Type() {
// Case where method call is via a non-pointer
// embedded field with a pointer method.
args = append(args, typecheck.NodAddrAt(base.Pos, dot.X))
} else {
args = append(args, dot.X)
}
args = append(args, ir.ParamNames(tfn.Type())...)
// Target method uses shaped names.
targs2 := make([]*types.Type, len(targs))
for i, t := range targs {
targs2[i] = typecheck.Shapify(t)
}
targs = targs2
sym := typecheck.MakeInstName(ir.MethodSym(methodrcvr, method.Sym), targs, true)
if sym.Def == nil {
// Currently we make sure that we have all the instantiations
// we need by generating them all in ../noder/stencil.go:instantiateMethods
// TODO: maybe there's a better, more incremental way to generate
// only the instantiations we need?
base.Fatalf("instantiation %s not found", sym.Name)
}
target := ir.AsNode(sym.Def)
call = ir.NewCallExpr(base.Pos, ir.OCALL, target, args)
// Fill-in the generic method node that was not filled in
// in instantiateMethod.
method.Nname = fn.Nname
} else {
call = ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
call.Args = ir.ParamNames(tfn.Type())
}
call.IsDDD = tfn.Type().IsVariadic()
if method.Type.NumResults() > 0 {
ret := ir.NewReturnStmt(base.Pos, nil)
@ -1858,13 +1967,10 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
ir.CurFunc = fn
typecheck.Stmts(fn.Body)
// Inline calls within (*T).M wrappers. This is safe because we only
// generate those wrappers within the same compilation unit as (T).M.
// TODO(mdempsky): Investigate why we can't enable this more generally.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
if AfterGlobalEscapeAnalysis {
inline.InlineCalls(fn)
escape.Batch([]*ir.Func{fn}, false)
}
escape.Batch([]*ir.Func{fn}, false)
ir.CurFunc = nil
typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
@ -1872,11 +1978,21 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
return lsym
}
// AfterGlobalEscapeAnalysis tracks whether package gc has already
// performed the main, global escape analysis pass. If so,
// methodWrapper takes responsibility for escape analyzing any
// generated wrappers.
var AfterGlobalEscapeAnalysis bool
var ZeroSize int64
// MarkTypeUsedInInterface marks that type t is converted to an interface.
// This information is used in the linker in dead method elimination.
func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) {
if t.HasShape() {
// Shape types shouldn't be put in interfaces, so we shouldn't ever get here.
base.Fatalf("shape types have no methods %+v", t)
}
tsym := TypeLinksym(t)
// Emit a marker relocation. The linker will know the type is converted
// to an interface if "from" is reachable.
@ -1897,9 +2013,55 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) {
tsym := TypeLinksym(ityp)
r := obj.Addrel(ir.CurFunc.LSym)
r.Sym = tsym
// dot.Xoffset is the method index * PtrSize (the offset of code pointer
// dot.Offset() is the method index * PtrSize (the offset of code pointer
// in itab).
midx := dot.Offset() / int64(types.PtrSize)
r.Add = InterfaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD
}
// MarkUsedIfaceMethodIndex marks that that method number ix (in the AllMethods list)
// of interface type ityp is used, and should be attached to lsym.
func MarkUsedIfaceMethodIndex(lsym *obj.LSym, ityp *types.Type, ix int) {
tsym := TypeLinksym(ityp)
r := obj.Addrel(lsym)
r.Sym = tsym
r.Add = InterfaceMethodOffset(ityp, int64(ix))
r.Type = objabi.R_USEIFACEMETHOD
}
// getDictionary returns the dictionary for the given named generic function
// or method, with the given type arguments.
func getDictionary(gf *types.Sym, targs []*types.Type) ir.Node {
if len(targs) == 0 {
base.Fatalf("%s should have type arguments", gf.Name)
}
for _, t := range targs {
if t.HasShape() {
base.Fatalf("dictionary for %s should only use concrete types: %+v", gf.Name, t)
}
}
sym := typecheck.MakeDictName(gf, targs, true)
// Initialize the dictionary, if we haven't yet already.
if lsym := sym.Linksym(); len(lsym.P) == 0 {
base.Fatalf("Dictionary should have already been generated: %s.%s", sym.Pkg.Path, sym.Name)
}
// Make a node referencing the dictionary symbol.
n := typecheck.NewName(sym)
n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
n.SetTypecheck(1)
n.Class = ir.PEXTERN
sym.Def = n
// Return the address of the dictionary.
np := typecheck.NodAddr(n)
// Note: treat dictionary pointers as uintptrs, so they aren't pointers
// with respect to GC. That saves on stack scanning work, write barriers, etc.
// We can get away with it because dictionaries are global variables.
np.SetType(types.Types[types.TUINTPTR])
np.SetTypecheck(1)
return np
}

View file

@ -16,7 +16,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.MAXWIDTH = 1 << 50
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.ZeroRange = zeroRange
arch.SSAMarkMoves = ssaMarkMoves

View file

@ -16,7 +16,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue

View file

@ -149,12 +149,6 @@ type Frontend interface {
// for the parts of that compound type.
SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
// DerefItab dereferences an itab function
// entry, given the symbol of the itab and
// the byte offset of the function pointer.
// It may return nil.
DerefItab(sym *obj.LSym, offset int64) *obj.LSym
// Line returns a string describing the given position.
Line(src.XPos) string
@ -177,7 +171,7 @@ type Frontend interface {
}
// NewConfig returns a new configuration object for the given architecture.
func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat bool) *Config {
c := &Config{arch: arch, Types: types}
c.useAvg = true
c.useHmul = true
@ -196,7 +190,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.floatParamRegs = paramFloatRegAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
c.hasGReg = buildcfg.Experiment.RegabiG
c.hasGReg = true
case "386":
c.PtrSize = 4
c.RegSize = 4
@ -228,6 +222,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.registers = registersARM64[:]
c.gpRegMask = gpRegMaskARM64
c.fpRegMask = fpRegMaskARM64
c.intParamRegs = paramIntRegARM64
c.floatParamRegs = paramFloatRegARM64
c.FPReg = framepointerRegARM64
c.LinkReg = linkRegARM64
c.hasGReg = true
@ -324,6 +320,10 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.optimize = optimize
c.useSSE = true
c.UseFMA = true
c.SoftFloat = softfloat
if softfloat {
c.floatParamRegs = nil // no FP registers in softfloat mode
}
c.ABI0 = abi.NewABIConfig(0, 0, ctxt.FixedFrameSize())
c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.FixedFrameSize())

View file

@ -215,7 +215,7 @@ func (x *expandState) isAlreadyExpandedAggregateType(t *types.Type) bool {
return false
}
return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
t.Size() > x.regSize && t.IsInteger()
(t.Size() > x.regSize && (t.IsInteger() || (x.f.Config.SoftFloat && t.IsFloat())))
}
// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
@ -380,6 +380,12 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
// The OpLoad was created to load the single field of the IData
// This case removes that StructSelect.
if leafType != selector.Type {
if x.f.Config.SoftFloat && selector.Type.IsFloat() {
if x.debug {
x.Printf("---OpLoad, break\n")
}
break // softfloat pass will take care of that
}
x.f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
}
leaf.copyOf(selector)
@ -525,11 +531,11 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
case OpComplexReal:
ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
locs = x.splitSlots(ls, ".real", 0, leafType)
locs = x.splitSlots(ls, ".real", 0, selector.Type)
case OpComplexImag:
ls := x.rewriteSelect(leaf, selector.Args[0], offset+leafType.Width, regOffset+RO_complex_imag) // result is FloatNN, width of result is offset of imaginary part.
locs = x.splitSlots(ls, ".imag", leafType.Width, leafType)
ls := x.rewriteSelect(leaf, selector.Args[0], offset+selector.Type.Width, regOffset+RO_complex_imag) // result is FloatNN, width of result is offset of imaginary part.
locs = x.splitSlots(ls, ".imag", selector.Type.Width, selector.Type)
case OpStringLen, OpSliceLen:
ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_slice_len)

View file

@ -39,7 +39,7 @@ func testConfigArch(tb testing.TB, arch string) *Conf {
tb.Fatal("testTypes is 64-bit only")
}
c := &Conf{
config: NewConfig(arch, testTypes, ctxt, true),
config: NewConfig(arch, testTypes, ctxt, true, false),
tb: tb,
}
return c

View file

@ -460,7 +460,7 @@
(IsInBounds idx len) => (SETB (CMPQ idx len))
(IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
(NilCheck ...) => (LoweredNilCheck ...)
(GetG mem) && !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal) => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
(GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
(GetCallerPC ...) => (LoweredGetCallerPC ...)
(GetCallerSP ...) => (LoweredGetCallerSP ...)

View file

@ -2868,3 +2868,12 @@
&& isInlinableMemmove(dst, src, sz, config)
&& clobber(s1, s2, s3, call)
=> (Move [sz] dst src mem)
// Match post-lowering calls, register version.
(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
&& sz >= 0
&& isSameCall(sym, "runtime.memmove")
&& call.Uses == 1
&& isInlinableMemmove(dst, src, sz, config)
&& clobber(call)
=> (Move [sz] dst src mem)

View file

@ -482,9 +482,9 @@ func init() {
{name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
{name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
// pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
@ -759,15 +759,17 @@ func init() {
}
archs = append(archs, arch{
name: "ARM64",
pkg: "cmd/internal/obj/arm64",
genfile: "../../arm64/ssa.go",
ops: ops,
blocks: blocks,
regnames: regNamesARM64,
gpregmask: gp,
fpregmask: fp,
framepointerreg: -1, // not used
linkreg: int8(num["R30"]),
name: "ARM64",
pkg: "cmd/internal/obj/arm64",
genfile: "../../arm64/ssa.go",
ops: ops,
blocks: blocks,
regnames: regNamesARM64,
ParamIntRegNames: "R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15",
ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15",
gpregmask: gp,
fpregmask: fp,
framepointerreg: -1, // not used
linkreg: int8(num["R30"]),
})
}

View file

@ -20664,7 +20664,7 @@ var opcodeTable = [...]opInfo{
{
name: "CALLstatic",
auxType: auxCallOff,
argLen: 1,
argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
@ -20674,7 +20674,7 @@ var opcodeTable = [...]opInfo{
{
name: "CALLclosure",
auxType: auxCallOff,
argLen: 3,
argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
@ -20688,7 +20688,7 @@ var opcodeTable = [...]opInfo{
{
name: "CALLinter",
auxType: auxCallOff,
argLen: 2,
argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
@ -36400,8 +36400,8 @@ var registersARM64 = [...]Register{
{62, arm64.REG_F31, -1, "F31"},
{63, 0, -1, "SB"},
}
var paramIntRegARM64 = []int8(nil)
var paramFloatRegARM64 = []int8(nil)
var paramIntRegARM64 = []int8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
var paramFloatRegARM64 = []int8{31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46}
var gpRegMaskARM64 = regMask(670826495)
var fpRegMaskARM64 = regMask(9223372034707292160)
var specialRegMaskARM64 = regMask(0)

View file

@ -745,27 +745,21 @@ func uaddOvf(a, b int64) bool {
return uint64(a)+uint64(b) < uint64(a)
}
// de-virtualize an InterCall
// 'sym' is the symbol for the itab
func devirt(v *Value, aux Aux, sym Sym, offset int64) *AuxCall {
f := v.Block.Func
n, ok := sym.(*obj.LSym)
if !ok {
// loadLSymOffset simulates reading a word at an offset into a
// read-only symbol's runtime memory. If it would read a pointer to
// another symbol, that symbol is returned. Otherwise, it returns nil.
func loadLSymOffset(lsym *obj.LSym, offset int64) *obj.LSym {
if lsym.Type != objabi.SRODATA {
return nil
}
lsym := f.fe.DerefItab(n, offset)
if f.pass.debug > 0 {
if lsym != nil {
f.Warnl(v.Pos, "de-virtualizing call")
} else {
f.Warnl(v.Pos, "couldn't de-virtualize call")
for _, r := range lsym.R {
if int64(r.Off) == offset && r.Type&^objabi.R_WEAK == objabi.R_ADDR && r.Add == 0 {
return r.Sym
}
}
if lsym == nil {
return nil
}
va := aux.(*AuxCall)
return StaticAuxCall(lsym, va.abiInfo)
return nil
}
// de-virtualize an InterLECall
@ -776,18 +770,14 @@ func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym {
return nil
}
f := v.Block.Func
lsym := f.fe.DerefItab(n, offset)
if f.pass.debug > 0 {
lsym := loadLSymOffset(n, offset)
if f := v.Block.Func; f.pass.debug > 0 {
if lsym != nil {
f.Warnl(v.Pos, "de-virtualizing call")
} else {
f.Warnl(v.Pos, "couldn't de-virtualize call")
}
}
if lsym == nil {
return nil
}
return lsym
}

View file

@ -3,7 +3,6 @@
package ssa
import "internal/buildcfg"
import "math"
import "cmd/internal/obj"
import "cmd/compile/internal/types"
@ -29339,11 +29338,11 @@ func rewriteValueAMD64_OpFloor(v *Value) bool {
func rewriteValueAMD64_OpGetG(v *Value) bool {
v_0 := v.Args[0]
// match: (GetG mem)
// cond: !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)
// cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal
// result: (LoweredGetG mem)
for {
mem := v_0
if !(!(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)) {
if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
break
}
v.reset(OpAMD64LoweredGetG)

View file

@ -25997,7 +25997,7 @@ func rewriteValueARM64_OpSelectN(v *Value) bool {
break
}
call := v_0
if call.Op != OpARM64CALLstatic {
if call.Op != OpARM64CALLstatic || len(call.Args) != 1 {
break
}
sym := auxToCall(call.Aux)
@ -26031,6 +26031,34 @@ func rewriteValueARM64_OpSelectN(v *Value) bool {
v.AddArg3(dst, src, mem)
return true
}
// match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
// cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
// result: (Move [sz] dst src mem)
for {
if auxIntToInt64(v.AuxInt) != 0 {
break
}
call := v_0
if call.Op != OpARM64CALLstatic || len(call.Args) != 4 {
break
}
sym := auxToCall(call.Aux)
mem := call.Args[3]
dst := call.Args[0]
src := call.Args[1]
call_2 := call.Args[2]
if call_2.Op != OpARM64MOVDconst {
break
}
sz := auxIntToInt64(call_2.AuxInt)
if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
break
}
v.reset(OpMove)
v.AuxInt = int64ToAuxInt(sz)
v.AddArg3(dst, src, mem)
return true
}
return false
}
func rewriteValueARM64_OpSlicemask(v *Value) bool {

View file

@ -220,7 +220,7 @@ func schedule(f *Func) {
// unless they are phi values (which must be first).
// OpArg also goes first -- if it is stack it register allocates
// to a LoadReg, if it is register it is from the beginning anyway.
if c.Op == OpPhi || c.Op == OpArg {
if score[c.ID] == ScorePhi || score[c.ID] == ScoreArg {
continue
}
score[c.ID] = ScoreControl

View file

@ -63,6 +63,7 @@ func softfloat(f *Func) {
v.Aux = f.Config.Types.UInt32
case 8:
v.Aux = f.Config.Types.UInt64
newInt64 = true
default:
v.Fatalf("bad float type with size %d", size)
}

View file

@ -152,6 +152,9 @@ func (s *SymABIs) GenABIWrappers() {
// Apply definitions.
defABI, hasDefABI := s.defs[symName]
if hasDefABI {
if len(fn.Body) != 0 {
base.ErrorfAt(fn.Pos(), "%v defined in both Go and assembly", fn)
}
fn.ABI = defABI
}

View file

@ -29,8 +29,7 @@ type ArchInfo struct {
// at function entry, and it is ok to clobber registers.
ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
Ginsnop func(*objw.Progs) *obj.Prog
Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn
Ginsnop func(*objw.Progs) *obj.Prog
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*State, *ssa.Block)
@ -42,10 +41,10 @@ type ArchInfo struct {
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *State, b, next *ssa.Block)
// LoadRegResults emits instructions that loads register-assigned results
// into registers. They are already in memory (PPARAMOUT nodes).
// Used in open-coded defer return path.
LoadRegResults func(s *State, f *ssa.Func)
// LoadRegResult emits instructions that loads register-assigned result
// at n+off (n is PPARAMOUT) to register reg. The result is already in
// memory. Used in open-coded defer return path.
LoadRegResult func(s *State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
// SpillArgReg emits instructions that spill reg to n+off.
SpillArgReg func(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog

View file

@ -114,7 +114,10 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
}
}
sort.Sort(byStackVar(fn.Dcl))
// Use sort.Stable instead of sort.Sort so stack layout (and thus
// compiler output) is less sensitive to frontend changes that
// introduce or remove unused variables.
sort.Stable(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false

View file

@ -87,8 +87,7 @@ func InitConfig() {
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
ssaConfig.SoftFloat = Arch.SoftFloat
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
ssaConfig.Race = base.Flag.Race
ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
@ -279,18 +278,6 @@ func regAbiForFuncType(ft *types.Func) bool {
return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName)
}
// getParam returns the Field of ith param of node n (which is a
// function/method/interface call), where the receiver of a method call is
// considered as the 0th parameter. This does not include the receiver of an
// interface call.
func getParam(n *ir.CallExpr, i int) *types.Field {
t := n.X.Type()
if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
}
return t.Params().Field(i)
}
// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
func dvarint(x *obj.LSym, off int, v int64) int {
if v < 0 || v > 1e9 {
@ -324,66 +311,21 @@ func dvarint(x *obj.LSym, off int, v int64) int {
// for stack variables are specified as the number of bytes below varp (pointer to the
// top of the local variables) for their starting address. The format is:
//
// - Max total argument size among all the defers
// - Offset of the deferBits variable
// - Number of defers in the function
// - Information about each defer call, in reverse order of appearance in the function:
// - Total argument size of the call
// - Offset of the closure value to call
// - Number of arguments (including interface receiver or method receiver as first arg)
// - Information about each argument
// - Offset of the stored defer argument in this function's frame
// - Size of the argument
// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
s.curfn.LSym.Func().OpenCodedDeferInfo = x
off := 0
// Compute maxargsize (max size of arguments for all defers)
// first, so we can output it first to the funcdata
var maxargsize int64
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
argsize := r.n.X.Type().ArgWidth() // TODO register args: but maybe use of abi0 will make this easy
if argsize > maxargsize {
maxargsize = argsize
}
}
off = dvarint(x, off, maxargsize)
off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
off = dvarint(x, off, r.n.X.Type().ArgWidth())
off = dvarint(x, off, -r.closureNode.FrameOffset())
numArgs := len(r.argNodes)
if r.rcvrNode != nil {
// If there's an interface receiver, treat/place it as the first
// arg. (If there is a method receiver, it's already included as
// first arg in r.argNodes.)
numArgs++
}
off = dvarint(x, off, int64(numArgs))
argAdjust := 0 // presence of receiver offsets the parameter count.
if r.rcvrNode != nil {
off = dvarint(x, off, -okOffset(r.rcvrNode.FrameOffset()))
off = dvarint(x, off, s.config.PtrSize)
off = dvarint(x, off, 0) // This is okay because defer records use ABI0 (for now)
argAdjust++
}
// TODO(register args) assume abi0 for this?
ab := s.f.ABI0
pri := ab.ABIAnalyzeFuncType(r.n.X.Type().FuncType())
for j, arg := range r.argNodes {
f := getParam(r.n, j)
off = dvarint(x, off, -okOffset(arg.FrameOffset()))
off = dvarint(x, off, f.Type.Size())
off = dvarint(x, off, okOffset(pri.InParam(j+argAdjust).FrameOffset(pri)))
}
}
}
@ -580,7 +522,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
// Populate closure variables.
if !fn.ClosureCalled() {
if fn.Needctxt() {
clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
for _, n := range fn.ClosureVars {
@ -650,7 +592,6 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
// it mimics the behavior of the former ABI (everything stored) and because it's not 100%
// clear if naming conventions are respected in autogenerated code.
// TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
// TODO non-amd64 architectures have link registers etc that may require adjustment here.
for _, p := range params.InParams() {
typs, offs := p.RegisterTypesAndOffsets()
for i, t := range typs {
@ -865,16 +806,6 @@ type openDeferInfo struct {
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
closureNode *ir.Name
// If defer call is interface call, the address of the argtmp where the
// receiver is stored
rcvr *ssa.Value
// The node representing the argtmp where the receiver is stored
rcvrNode *ir.Name
// The addresses of the argtmps where the evaluated arguments of the defer
// function call are stored.
argVals []*ssa.Value
// The nodes representing the argtmps where the args of the defer are stored
argNodes []*ir.Name
}
type state struct {
@ -1491,7 +1422,12 @@ func (s *state) stmt(n ir.Node) {
case ir.OAS2DOTTYPE:
n := n.(*ir.AssignListStmt)
res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
var res, resok *ssa.Value
if n.Rhs[0].Op() == ir.ODOTTYPE2 {
res, resok = s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
} else {
res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
}
deref := false
if !TypeOK(n.Rhs[0].Type()) {
if res.Op != ssa.OpLoad {
@ -2748,6 +2684,11 @@ func (s *state) expr(n ir.Node) *ssa.Value {
res, _ := s.dottype(n, false)
return res
case ir.ODYNAMICDOTTYPE:
n := n.(*ir.DynamicTypeAssertExpr)
res, _ := s.dynamicDottype(n, false)
return res
// binary ops
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
n := n.(*ir.BinaryExpr)
@ -3183,7 +3124,7 @@ func (s *state) expr(n ir.Node) *ssa.Value {
}
fallthrough
case ir.OCALLINTER, ir.OCALLMETH:
case ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callResult(n, callNormal)
@ -3191,6 +3132,14 @@ func (s *state) expr(n ir.Node) *ssa.Value {
n := n.(*ir.CallExpr)
return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
case ir.OGETCALLERPC:
n := n.(*ir.CallExpr)
return s.newValue0(ssa.OpGetCallerPC, n.Type())
case ir.OGETCALLERSP:
n := n.(*ir.CallExpr)
return s.newValue0(ssa.OpGetCallerSP, n.Type())
case ir.OAPPEND:
return s.append(n.(*ir.CallExpr), false)
@ -3703,6 +3652,16 @@ func softfloatInit() {
// TODO: do not emit sfcall if operation can be optimized to constant in later
// opt phase
func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
f2i := func(t *types.Type) *types.Type {
switch t.Kind() {
case types.TFLOAT32:
return types.Types[types.TUINT32]
case types.TFLOAT64:
return types.Types[types.TUINT64]
}
return t
}
if callDef, ok := softFloatOps[op]; ok {
switch op {
case ssa.OpLess32F,
@ -3715,7 +3674,19 @@ func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
}
result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
// runtime functions take uints for floats and returns uints.
// Convert to uints so we use the right calling convention.
for i, a := range args {
if a.Type.IsFloat() {
args[i] = s.newValue1(ssa.OpCopy, f2i(a.Type), a)
}
}
rt := types.Types[callDef.rtype]
result := s.rtcall(callDef.rtfn, true, []*types.Type{f2i(rt)}, args...)[0]
if rt.IsFloat() {
result = s.newValue1(ssa.OpCopy, rt, result)
}
if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
result = s.newValue1(ssa.OpNot, result.Type, result)
}
@ -4687,17 +4658,14 @@ func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
return args
}
// openDeferRecord adds code to evaluate and store the args for an open-code defer
// openDeferRecord adds code to evaluate and store the function for an open-code defer
// call, and records info about the defer, so we can generate proper code on the
// exit paths. n is the sub-node of the defer node that is the actual function
// call. We will also record funcdata information on where the args are stored
// call. We will also record funcdata information on where the function is stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
var args []*ssa.Value
var argNodes []*ir.Name
if buildcfg.Experiment.RegabiDefer && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
s.Fatalf("defer call with arguments or results: %v", n)
}
@ -4705,48 +4673,20 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
n: n,
}
fn := n.X
if n.Op() == ir.OCALLFUNC {
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
closureVal := s.expr(fn)
closure := s.openDeferSave(nil, fn.Type(), closureVal)
opendefer.closureNode = closure.Aux.(*ir.Name)
if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
opendefer.closure = closure
}
} else if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
} else {
if fn.Op() != ir.ODOTINTER {
base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
fn := fn.(*ir.SelectorExpr)
closure, rcvr := s.getClosureAndRcvr(fn)
opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
// Important to get the receiver type correct, so it is recognized
// as a pointer for GC purposes.
opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
closureVal := s.expr(fn)
closure := s.openDeferSave(fn.Type(), closureVal)
opendefer.closureNode = closure.Aux.(*ir.Name)
if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
opendefer.closure = closure
}
for _, argn := range n.Args {
var v *ssa.Value
if TypeOK(argn.Type()) {
v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
} else {
v = s.openDeferSave(argn, argn.Type(), nil)
}
args = append(args, v)
argNodes = append(argNodes, v.Aux.(*ir.Name))
}
opendefer.argVals = args
opendefer.argNodes = argNodes
index := len(s.openDefers)
s.openDefers = append(s.openDefers, opendefer)
// Update deferBits only after evaluation and storage to stack of
// args/receiver/interface is successful.
// the function is successful.
bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
s.vars[deferBitsVar] = newDeferBits
@ -4755,57 +4695,47 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
// openDeferSave generates SSA nodes to store a value (with type t) for an
// open-coded defer at an explicit autotmp location on the stack, so it can be
// reloaded and used for the appropriate call on exit. If type t is SSAable, then
// val must be non-nil (and n should be nil) and val is the value to be stored. If
// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
// evaluated (via s.addr() below) to get the value that is to be stored. The
// function returns an SSA value representing a pointer to the autotmp location.
func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
canSSA := TypeOK(t)
var pos src.XPos
if canSSA {
pos = val.Pos
} else {
pos = n.Pos()
// reloaded and used for the appropriate call on exit. Type t must be a function type
// (therefore SSAable). val is the value to be stored. The function returns an SSA
// value representing a pointer to the autotmp location.
func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
if !TypeOK(t) {
s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
}
argTemp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
argTemp.SetOpenDeferSlot(true)
var addrArgTemp *ssa.Value
// Use OpVarLive to make sure stack slots for the args, etc. are not
// removed by dead-store elimination
if !t.HasPointers() {
s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
}
pos := val.Pos
temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
temp.SetOpenDeferSlot(true)
var addrTemp *ssa.Value
// Use OpVarLive to make sure stack slot for the closure is not removed by
// dead-store elimination
if s.curBlock.ID != s.f.Entry.ID {
// Force the argtmp storing this defer function/receiver/arg to be
// declared in the entry block, so that it will be live for the
// defer exit code (which will actually access it only if the
// associated defer call has been activated).
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
addrArgTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
// Force the tmp storing this defer function to be declared in the entry
// block, so that it will be live for the defer exit code (which will
// actually access it only if the associated defer call has been activated).
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
}
if t.HasPointers() {
// Since we may use this argTemp during exit depending on the
// deferBits, we must define it unconditionally on entry.
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
argTemp.SetNeedzero(true)
}
if !canSSA {
a := s.addr(n)
s.move(t, addrArgTemp, a)
return addrArgTemp
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
}
// Since we may use this temp during exit depending on the
// deferBits, we must define it unconditionally on entry.
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
temp.SetNeedzero(true)
// We are storing to the stack, hence we can avoid the full checks in
// storeType() (no write barrier) and do a simple store().
s.store(t, addrArgTemp, val)
return addrArgTemp
s.store(t, addrTemp, val)
return addrTemp
}
// openDeferExit generates SSA for processing all the open coded defers at exit.
@ -4849,45 +4779,26 @@ func (s *state) openDeferExit() {
s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
// closure/receiver/args that were stored in argtmps at the point
// of the defer statement.
// closure that were stored in argtmps at the point of the defer
// statement.
fn := r.n.X
stksize := fn.Type().ArgWidth()
var ACArgs []*types.Type
var ACResults []*types.Type
var callArgs []*ssa.Value
if r.rcvr != nil {
// rcvr in case of OCALLINTER
v := s.load(r.rcvr.Type.Elem(), r.rcvr)
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
callArgs = append(callArgs, v)
}
for j, argAddrVal := range r.argVals {
f := getParam(r.n, j)
ACArgs = append(ACArgs, f.Type)
var a *ssa.Value
if !TypeOK(f.Type) {
a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
} else {
a = s.load(f.Type, argAddrVal)
}
callArgs = append(callArgs, a)
}
var call *ssa.Value
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
} else {
aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
callArgs = append(callArgs, s.mem())
call.AddArgs(callArgs...)
call.AuxInt = stksize
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
@ -4895,16 +4806,6 @@ func (s *state) openDeferExit() {
if r.closureNode != nil {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
if r.rcvrNode != nil {
if r.rcvrNode.Type().HasPointers() {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
}
}
for _, argNode := range r.argNodes {
if argNode.Type().HasPointers() {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
}
}
s.endBlock()
s.startBlock(bEnd)
@ -4952,7 +4853,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
}
if buildcfg.Experiment.RegabiDefer && k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
if k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
s.Fatalf("go/defer call with arguments: %v", n)
}
@ -4986,8 +4887,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// not the point of defer statement.
s.maybeNilCheckClosure(closure, k)
}
case ir.OCALLMETH:
base.Fatalf("OCALLMETH missed by walkCall")
case ir.OCALLINTER:
if fn.Op() != ir.ODOTINTER {
s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
@ -5023,55 +4922,31 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
var call *ssa.Value
if k == callDeferStack {
// Make a defer struct d on the stack.
t := deferstruct(stksize)
if stksize != 0 {
s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
}
t := deferstruct()
d := typecheck.TempAt(n.Pos(), s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
// 0: siz
s.store(types.Types[types.TUINT32],
s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
s.constInt32(types.Types[types.TUINT32], int32(stksize)))
// 1: started, set in deferprocStack
// 2: heap, set in deferprocStack
// 3: openDefer
// 4: sp, set in deferprocStack
// 5: pc, set in deferprocStack
// 6: fn
// Must match deferstruct() below and src/runtime/runtime2.go:_defer.
// 0: started, set in deferprocStack
// 1: heap, set in deferprocStack
// 2: openDefer
// 3: sp, set in deferprocStack
// 4: pc, set in deferprocStack
// 5: fn
s.store(closure.Type,
s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(6), addr),
s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
closure)
// 7: panic, set in deferprocStack
// 8: link, set in deferprocStack
// 9: framepc
// 10: varp
// 11: fd
// Then, store all the arguments of the defer call.
ft := fn.Type()
off := t.FieldOff(12) // TODO register args: be sure this isn't a hardcoded param stack offset.
args := n.Args
i0 := 0
// Set receiver (for interface calls). Always a pointer.
if rcvr != nil {
p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
s.store(types.Types[types.TUINTPTR], p, rcvr)
i0 = 1
}
// Set receiver (for method calls).
if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
}
// Set other args.
// This code is only used when RegabiDefer is not enabled, and arguments are always
// passed on stack.
for i, f := range ft.Params().Fields().Slice() {
s.storeArgWithBase(args[0], f.Type, addr, off+params.InParam(i+i0).FrameOffset(params))
args = args[1:]
}
// 6: panic, set in deferprocStack
// 7: link, set in deferprocStack
// 8: fd
// 9: varp
// 10: framepc
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
@ -5079,26 +4954,18 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
if stksize < int64(types.PtrSize) {
// We need room for both the call to deferprocStack and the call to
// the deferred function.
stksize = int64(types.PtrSize)
}
call.AuxInt = stksize
call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal {
// Write argsize and closure (args to newproc/deferproc).
argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
ACArgs = append(ACArgs, types.Types[types.TUINT32]) // not argExtra
callArgs = append(callArgs, argsize)
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
// Write closure (arg to newproc/deferproc).
ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
callArgs = append(callArgs, closure)
stksize += 2 * int64(types.PtrSize)
argStart += 2 * int64(types.PtrSize)
stksize += int64(types.PtrSize)
argStart += int64(types.PtrSize)
}
// Set receiver (for interface calls).
@ -5109,9 +4976,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// Write args.
t := n.X.Type()
args := n.Args
if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
}
for _, p := range params.InParams() { // includes receiver for interface calls
ACArgs = append(ACArgs, p.Type)
@ -5314,9 +5178,13 @@ func (s *state) addr(n ir.Node) *ssa.Value {
case ir.OCALLFUNC, ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callAddr(n, callNormal)
case ir.ODOTTYPE:
n := n.(*ir.TypeAssertExpr)
v, _ := s.dottype(n, false)
case ir.ODOTTYPE, ir.ODYNAMICDOTTYPE:
var v *ssa.Value
if n.Op() == ir.ODOTTYPE {
v, _ = s.dottype(n.(*ir.TypeAssertExpr), false)
} else {
v, _ = s.dynamicDottype(n.(*ir.DynamicTypeAssertExpr), false)
}
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
@ -6210,14 +6078,38 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *
func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X) // input interface
target := s.reflectType(n.Type()) // target type
byteptr := s.f.Config.Types.BytePtr
var targetItab *ssa.Value
if n.Itab != nil {
targetItab = s.expr(n.Itab)
}
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, targetItab, commaok)
}
if n.Type().IsInterface() {
if n.Type().IsEmptyInterface() {
func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X)
target := s.expr(n.T)
var itab *ssa.Value
if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() {
byteptr := s.f.Config.Types.BytePtr
itab = target
target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)) // itab.typ
}
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, itab, commaok)
}
// dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
// and src is the type we're asserting from.
// target is the *runtime._type of dst.
// If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
// commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
byteptr := s.f.Config.Types.BytePtr
if dst.IsInterface() {
if dst.IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion inlined")
base.WarnfAt(pos, "type assertion inlined")
}
// Get itab/type field from input.
@ -6225,7 +6117,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// Conversion succeeds iff that field is not nil.
cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
if n.X.Type().IsEmptyInterface() && commaok {
if src.IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
@ -6247,7 +6139,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
if n.X.Type().IsEmptyInterface() {
if src.IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
@ -6255,7 +6147,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
typ := s.load(byteptr, off)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
res = s.newValue2(ssa.OpIMake, dst, typ, idata)
return
}
@ -6277,62 +6169,62 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
res = s.newValue2(ssa.OpIMake, dst, s.variable(typVar, byteptr), idata)
resok = cond
delete(s.vars, typVar)
return
}
// converting to a nonempty interface needs a runtime call.
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion not inlined")
base.WarnfAt(pos, "type assertion not inlined")
}
if !commaok {
fn := ir.Syms.AssertI2I
if n.X.Type().IsEmptyInterface() {
if src.IsEmptyInterface() {
fn = ir.Syms.AssertE2I
}
data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
tab := s.newValue1(ssa.OpITab, byteptr, iface)
tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
return s.newValue2(ssa.OpIMake, n.Type(), tab, data), nil
return s.newValue2(ssa.OpIMake, dst, tab, data), nil
}
fn := ir.Syms.AssertI2I2
if n.X.Type().IsEmptyInterface() {
if src.IsEmptyInterface() {
fn = ir.Syms.AssertE2I2
}
res = s.rtcall(fn, true, []*types.Type{n.Type()}, target, iface)[0]
resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(n.Type()))
res = s.rtcall(fn, true, []*types.Type{dst}, target, iface)[0]
resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(dst))
return
}
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion inlined")
base.WarnfAt(pos, "type assertion inlined")
}
// Converting to a concrete type.
direct := types.IsDirectIface(n.Type())
direct := types.IsDirectIface(dst)
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion inlined")
base.WarnfAt(pos, "type assertion inlined")
}
var targetITab *ssa.Value
if n.X.Type().IsEmptyInterface() {
var wantedFirstWord *ssa.Value
if src.IsEmptyInterface() {
// Looking for pointer to target type.
targetITab = target
wantedFirstWord = target
} else {
// Looking for pointer to itab for target type and source interface.
targetITab = s.expr(n.Itab)
wantedFirstWord = targetItab
}
var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
if commaok && !TypeOK(n.Type()) {
if commaok && !TypeOK(dst) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
tmp, addr = s.temp(n.Pos(), n.Type())
tmp, addr = s.temp(pos, dst)
}
cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab)
cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, wantedFirstWord)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
@ -6346,8 +6238,8 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.reflectType(n.X.Type())
if n.X.Type().IsEmptyInterface() {
taddr := s.reflectType(src)
if src.IsEmptyInterface() {
s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
@ -6356,10 +6248,10 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// on success, return data from interface
s.startBlock(bOk)
if direct {
return s.newValue1(ssa.OpIData, n.Type(), iface), nil
return s.newValue1(ssa.OpIData, dst, iface), nil
}
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
return s.load(n.Type(), p), nil
p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
return s.load(dst, p), nil
}
// commaok is the more complicated case because we have
@ -6373,14 +6265,14 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
s.startBlock(bOk)
if tmp == nil {
if direct {
s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
s.vars[valVar] = s.newValue1(ssa.OpIData, dst, iface)
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
s.vars[valVar] = s.load(n.Type(), p)
p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
s.vars[valVar] = s.load(dst, p)
}
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
s.move(n.Type(), addr, p)
p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
s.move(dst, addr, p)
}
s.vars[okVar] = s.constBool(true)
s.endBlock()
@ -6389,9 +6281,9 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
s.vars[valVar] = s.zeroVal(n.Type())
s.vars[valVar] = s.zeroVal(dst)
} else {
s.zero(n.Type(), addr)
s.zero(dst, addr)
}
s.vars[okVar] = s.constBool(false)
s.endBlock()
@ -6400,10 +6292,10 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// merge point
s.startBlock(bEnd)
if tmp == nil {
res = s.variable(valVar, n.Type())
res = s.variable(valVar, dst)
delete(s.vars, valVar)
} else {
res = s.load(n.Type(), addr)
res = s.load(dst, addr)
s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
}
resok = s.variable(okVar, types.Types[types.TBOOL])
@ -6921,8 +6813,12 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// recovers a panic, it will return to caller with right results.
// The results are already in memory, because they are not SSA'd
// when the function has defers (see canSSAName).
if f.OwnAux.ABIInfo().OutRegistersUsed() != 0 {
Arch.LoadRegResults(&s, f)
for _, o := range f.OwnAux.ABIInfo().OutParams() {
n := o.Name.(*ir.Name)
rts, offs := o.RegisterTypesAndOffsets()
for i := range o.Registers {
Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
}
}
pp.Prog(obj.ARET)
@ -7460,18 +7356,6 @@ func (s *State) PrepareCall(v *ssa.Value) {
call, ok := v.Aux.(*ssa.AuxCall)
if ok && call.Fn == ir.Syms.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
Arch.Ginsnopdefer(s.pp)
}
if ok {
// Record call graph information for nowritebarrierrec
// analysis.
@ -7542,10 +7426,6 @@ func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
return reflectdata.ITabSym(it, offset)
}
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N
@ -7676,9 +7556,8 @@ func max8(a, b int8) int8 {
return b
}
// deferstruct makes a runtime._defer structure, with additional space for
// stksize bytes of args.
func deferstruct(stksize int64) *types.Type {
// deferstruct makes a runtime._defer structure.
func deferstruct() *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
@ -7686,13 +7565,9 @@ func deferstruct(stksize int64) *types.Type {
sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
return types.NewField(src.NoXPos, sym, typ)
}
argtype := types.NewArray(types.Types[types.TUINT8], stksize)
argtype.Width = stksize
argtype.Align = 1
// These fields must match the ones in runtime/runtime2.go:_defer and
// cmd/compile/internal/gc/ssa.go:(*state).call.
// (*state).call above.
fields := []*types.Field{
makefield("siz", types.Types[types.TUINT32]),
makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
makefield("openDefer", types.Types[types.TBOOL]),
@ -7704,10 +7579,9 @@ func deferstruct(stksize int64) *types.Type {
makefield("fn", types.Types[types.TUINTPTR]),
makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
makefield("framepc", types.Types[types.TUINTPTR]),
makefield("varp", types.Types[types.TUINTPTR]),
makefield("fd", types.Types[types.TUINTPTR]),
makefield("args", argtype),
makefield("varp", types.Types[types.TUINTPTR]),
makefield("framepc", types.Types[types.TUINTPTR]),
}
// build struct holding the above fields

Some files were not shown because too many files have changed in this diff Show more