Merge "[dev.typeparams] all: merge dev.regabi (7e0a81d) into dev.typeparams" into dev.typeparams

This commit is contained in:
Gerrit Code Review 2021-01-23 01:19:52 +00:00
commit 1946a77e69
352 changed files with 9476 additions and 7174 deletions

View file

@ -1,15 +1,6 @@
pkg archive/zip, method (*ReadCloser) Open(string) (fs.File, error)
pkg archive/zip, method (*Reader) Open(string) (fs.File, error)
pkg crypto/x509, method (SystemRootsError) Unwrap() error
pkg crypto/x509, type CertificateRequest struct, BasicConstraintsValid bool
pkg crypto/x509, type CertificateRequest struct, ExtKeyUsage []ExtKeyUsage
pkg crypto/x509, type CertificateRequest struct, IsCA bool
pkg crypto/x509, type CertificateRequest struct, KeyUsage KeyUsage
pkg crypto/x509, type CertificateRequest struct, MaxPathLen int
pkg crypto/x509, type CertificateRequest struct, MaxPathLenZero bool
pkg crypto/x509, type CertificateRequest struct, PolicyIdentifiers []asn1.ObjectIdentifier
pkg crypto/x509, type CertificateRequest struct, SubjectKeyId []uint8
pkg crypto/x509, type CertificateRequest struct, UnknownExtKeyUsage []asn1.ObjectIdentifier
pkg debug/elf, const DT_ADDRRNGHI = 1879047935
pkg debug/elf, const DT_ADDRRNGHI DynTag
pkg debug/elf, const DT_ADDRRNGLO = 1879047680
@ -235,9 +226,12 @@ pkg embed, type FS struct
pkg flag, func Func(string, string, func(string) error)
pkg flag, method (*FlagSet) Func(string, string, func(string) error)
pkg go/build, type Package struct, EmbedPatterns []string
pkg go/build, type Package struct, EmbedPatternPos map[string][]token.Position
pkg go/build, type Package struct, IgnoredOtherFiles []string
pkg go/build, type Package struct, TestEmbedPatterns []string
pkg go/build, type Package struct, TestEmbedPatternPos map[string][]token.Position
pkg go/build, type Package struct, XTestEmbedPatterns []string
pkg go/build, type Package struct, XTestEmbedPatternPos map[string][]token.Position
pkg html/template, func ParseFS(fs.FS, ...string) (*Template, error)
pkg html/template, method (*Template) ParseFS(fs.FS, ...string) (*Template, error)
pkg io, func NopCloser(Reader) ReadCloser
@ -404,7 +398,6 @@ pkg runtime/metrics, type Description struct, Cumulative bool
pkg runtime/metrics, type Description struct, Description string
pkg runtime/metrics, type Description struct, Kind ValueKind
pkg runtime/metrics, type Description struct, Name string
pkg runtime/metrics, type Description struct, StopTheWorld bool
pkg runtime/metrics, type Float64Histogram struct
pkg runtime/metrics, type Float64Histogram struct, Buckets []float64
pkg runtime/metrics, type Float64Histogram struct, Counts []uint64
@ -437,10 +430,8 @@ pkg syscall (linux-arm-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, u
pkg syscall (linux-arm-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm-cgo), func Setegid(int) error
pkg syscall (linux-arm-cgo), func Seteuid(int) error
pkg syscall (windows-386), func RtlGenRandom(*uint8, uint32) error
pkg syscall (windows-386), method (*DLLError) Unwrap() error
pkg syscall (windows-386), type SysProcAttr struct, NoInheritHandles bool
pkg syscall (windows-amd64), func RtlGenRandom(*uint8, uint32) error
pkg syscall (windows-amd64), method (*DLLError) Unwrap() error
pkg syscall (windows-amd64), type SysProcAttr struct, NoInheritHandles bool
pkg testing/fstest, func TestFS(fs.FS, ...string) error

View file

@ -1023,13 +1023,13 @@ New files that you contribute should use the standard copyright header:
</p>
<pre>
// Copyright 2020 The Go Authors. All rights reserved.
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
</pre>
<p>
(Use the current year if you're reading this in 2021 or beyond.)
(Use the current year if you're reading this in 2022 or beyond.)
Files in the repository are copyrighted the year they are added.
Do not update the copyright year on files that you change.
</p>

View file

@ -19,13 +19,11 @@ editing, navigation, testing, and debugging experience.
</p>
<ul>
<li><a href="https://github.com/fatih/vim-go">vim</a>: vim-go plugin provides Go programming language support</li>
<li><a href="https://marketplace.visualstudio.com/items?itemName=golang.go">Visual Studio Code</a>:
Go extension provides support for the Go programming language</li>
<li><a href="https://www.jetbrains.com/go">GoLand</a>: GoLand is distributed either as a standalone IDE
or as a plugin for IntelliJ IDEA Ultimate</li>
<li><a href="https://atom.io/packages/go-plus">Atom</a>: Go-Plus is an Atom package that provides enhanced Go support</li>
</ul>
<li><a href="https://github.com/fatih/vim-go">vim</a>: vim-go plugin provides Go programming language support</li>
<p>
Note that these are only a few top solutions; a more comprehensive

View file

@ -55,7 +55,9 @@ Do not send CLs removing the interior tags from such phrases.
Go 1.16 adds an <code>ios/amd64</code> port, which targets the iOS
simulator running on AMD64-based macOS. Previously this was
unofficially supported through <code>darwin/amd64</code> with
the <code>ios</code> build tag set.
the <code>ios</code> build tag set. See also
<a href="/misc/ios/README"><code>misc/ios/README</code></a> for
details about how to build programs for iOS and iOS simulator.
</p>
<p><!-- golang.org/issue/23011 -->
@ -162,6 +164,12 @@ Do not send CLs removing the interior tags from such phrases.
non-reproducible builds.
</p>
<p><!-- golang.org/issue/43052 -->
The <code>go</code> command now disallows non-ASCII import paths in module
mode. Non-ASCII module paths have already been disallowed so this change
affects module subdirectory paths that contain non-ASCII characters.
</p>
<h4 id="embed">Embedding Files</h4>
<p>
@ -504,6 +512,16 @@ func TestFoo(t *testing.T) {
in mind.
</p>
<dl id="archive/zip"><dt><a href="/pkg/archive/zip/">archive/zip</a></dt>
<dd>
<p><!-- CL 243937 -->
The new <a href="/pkg/archive/zip/#Reader.Open"><code>Reader.Open</code></a>
method implements the <a href="/pkg/io/fs/#FS"><code>fs.FS</code></a>
interface.
</p>
</dd>
</dl>
<dl id="crypto/dsa"><dt><a href="/pkg/crypto/dsa/">crypto/dsa</a></dt>
<dd>
<p><!-- CL 257939 -->
@ -588,14 +606,6 @@ func TestFoo(t *testing.T) {
a malformed certificate.
</p>
<p><!-- CL 233163 -->
A number of additional fields have been added to the
<a href="/pkg/crypto/x509/#CertificateRequest"><code>CertificateRequest</code></a> type.
These fields are now parsed in <a href="/pkg/crypto/x509/#ParseCertificateRequest">
<code>ParseCertificateRequest</code></a> and marshalled in
<a href="/pkg/crypto/x509/#CreateCertificateRequest"><code>CreateCertificateRequest</code></a>.
</p>
<p><!-- CL 257939 -->
DSA signature verification is no longer supported. Note that DSA signature
generation was never supported.
@ -616,6 +626,16 @@ func TestFoo(t *testing.T) {
</dd>
</dl><!-- crypto/x509 -->
<dl id="debug/elf"><dt><a href="/pkg/debug/elf/">debug/elf</a></dt>
<dd>
<p><!-- CL 255138 -->
More <a href="/pkg/debug/elf/#DT_NULL"><code>DT</code></a>
and <a href="/pkg/debug/elf/#PT_NULL"><code>PT</code></a>
constants have been added.
</p>
</dd>
</dl><!-- debug/elf -->
<dl id="encoding/asn1"><dt><a href="/pkg/encoding/asn1">encoding/asn1</a></dt>
<dd>
<p><!-- CL 255881 -->
@ -665,6 +685,18 @@ func TestFoo(t *testing.T) {
</dd>
</dl><!-- flag -->
<dl id="html/template"><dt><a href="/pkg/html/template/">html/template</a></dt>
<dd>
<p><!-- CL 243938 -->
The new <a href="/pkg/html/template/#ParseFS"><code>template.ParseFS</code></a>
function and <a href="/pkg/html/template/#Template.ParseFS"><code>template.Template.ParseFS</code></a>
method are like <a href="/pkg/html/template/#ParseGlob"><code>template.ParseGlob</code></a>
and <a href="/pkg/html/template/#Template.ParseGlob"><code>template.Template.ParseGlob</code></a>,
but read the templates from an <a href="/pkg/io/fs/#FS"><code>fs.FS</code></a>.
</p>
</dd>
</dl><!-- html/template -->
<dl id="io"><dt><a href="/pkg/io/">io</a></dt>
<dd>
<p><!-- CL 261577 -->
@ -774,6 +806,25 @@ func TestFoo(t *testing.T) {
environment variable for <code>https://</code> URLs when
<code>HTTPS_PROXY</code> is unset.
</p>
<p><!-- 259917 -->
The <a href="/pkg/net/http/#Transport"><code>Transport</code></a>
type has a new field
<a href="/pkg/net/http/#Transport.GetProxyConnectHeader"><code>GetProxyConnectHeader</code></a>
which may be set to a function that returns headers to send to a
proxy during a <code>CONNECT</code> request.
In effect <code>GetProxyConnectHeader</code> is a dynamic
version of the existing field
<a href="/pkg/net/http/#Transport.ProxyConnectHeader"><code>ProxyConnectHeader</code></a>;
if <code>GetProxyConnectHeader</code> is not <code>nil</code>,
then <code>ProxyConnectHeader</code> is ignored.
</p>
<p><!-- CL 243939 -->
The new <a href="/pkg/net/http/#FS"><code>http.FS</code></a>
function converts an <a href="/pkg/io/fs/#FS"><code>fs.FS</code></a>
to an <a href="/pkg/net/http/#Handler"><code>http.Handler</code></a>.
</p>
</dd>
</dl><!-- net/http -->
@ -834,6 +885,21 @@ func TestFoo(t *testing.T) {
<dl id="path/filepath"><dt><a href="/pkg/path/filepath/">path/filepath</a></dt>
<dd>
<p><!-- CL 267887 -->
The new function
<a href="/pkg/path/filepath/WalkDir"><code>WalkDir</code></a>
is similar to
<a href="/pkg/path/filepath/Walk"><code>Walk</code></a>,
but is typically more efficient.
The function passed to <code>WalkDir</code> receives a
<a href="/pkg/io/fs/#DirEntry"><code>fs.DirEntry</code></a>
instead of a
<a href="/pkg/io/fs/#FileInfo"><code>fs.FileInfo</code></a>.
(To clarify for those who recall the <code>Walk</code> function
as taking an <a href="/pkg/os/#FileInfo"><code>os.FileInfo</code></a>,
<code>os.FileInfo</code> is now an alias for <code>fs.FileInfo</code>.)
</p>
<p><!-- CL 264397, golang.org/issues/28614 -->
The <a href="/pkg/path/filepath#Match"><code>Match</code></a> and
<a href="/pkg/path/filepath#Glob"><code>Glob</code></a> functions now
@ -845,17 +911,6 @@ func TestFoo(t *testing.T) {
</dd>
</dl><!-- path/filepath -->
<dl id="reflect"><dt><a href="/pkg/reflect/">reflect</a></dt>
<dd>
<p><!-- CL 248341, golang.org/issues/40281 -->
<a href="/pkg/reflect/#StructTag"><code>StructTag</code></a>
now allows multiple space-separated keys in key:value pairs,
as in <code>`json xml:"field1"`</code> (equivalent to
<code>`json:"field1" xml:"field1"`</code>).
</p>
</dd>
</dl><!-- reflect -->
<dl id="runtime/debug"><dt><a href="/pkg/runtime/debug/">runtime/debug</a></dt>
<dd>
<p><!-- CL 249677 -->
@ -893,11 +948,11 @@ func TestFoo(t *testing.T) {
</p>
<p><!-- CL 261917 -->
<a href="/pkg/syscall/?GOOS=windows#SysProcAttr"><code>SysProcAttr</code></a> on Windows has a new NoInheritHandles field that disables inheriting handles when creating a new process.
<a href="/pkg/syscall/?GOOS=windows#SysProcAttr"><code>SysProcAttr</code></a> on Windows has a new <code>NoInheritHandles</code> field that disables inheriting handles when creating a new process.
</p>
<p><!-- CL 269761, golang.org/issue/42584 -->
<a href="/pkg/syscall/?GOOS=windows#DLLError"><code>DLLError</code></a> on Windows now has an Unwrap function for unwrapping its underlying error.
<a href="/pkg/syscall/?GOOS=windows#DLLError"><code>DLLError</code></a> on Windows now has an <code>Unwrap</code> method for unwrapping its underlying error.
</p>
<p><!-- CL 210639 -->
@ -907,6 +962,16 @@ func TestFoo(t *testing.T) {
and related calls are now implemented.
Previously, they returned an <code>syscall.EOPNOTSUPP</code> error.
</p>
<p><!-- CL 210639 -->
On Linux, the new functions
<a href="/pkg/syscall/#AllThreadsSyscall"><code>AllThreadsSyscall</code></a>
and <a href="/pkg/syscall/#AllThreadsSyscall6"><code>AllThreadsSyscall6</code></a>
may be used to make a system call on all Go threads in the process.
These functions may only be used by programs that do not use cgo;
if a program uses cgo, they will always return
<a href="/pkg/syscall/#ENOTSUP"><code>syscall.ENOTSUP</code></a>.
</p>
</dd>
</dl><!-- syscall -->
@ -916,6 +981,14 @@ func TestFoo(t *testing.T) {
Newlines characters are now allowed inside action delimiters,
permitting actions to span multiple lines.
</p>
<p><!-- CL 243938 -->
The new <a href="/pkg/text/template/#ParseFS"><code>template.ParseFS</code></a>
function and <a href="/pkg/text/template/#Template.ParseFS"><code>template.Template.ParseFS</code></a>
method are like <a href="/pkg/text/template/#ParseGlob"><code>template.ParseGlob</code></a>
and <a href="/pkg/text/template/#Template.ParseGlob"><code>template.Template.ParseGlob</code></a>,
but read the templates from an <a href="/pkg/io/fs/#FS"><code>fs.FS</code></a>.
</p>
</dd>
</dl><!-- text/template -->

View file

@ -8,8 +8,8 @@
# Consult https://www.iana.org/time-zones for the latest versions.
# Versions to use.
CODE=2020e
DATA=2020e
CODE=2020f
DATA=2020f
set -e
rm -rf work

Binary file not shown.

View file

@ -30,7 +30,7 @@ func TestCrossPackageTests(t *testing.T) {
switch runtime.GOOS {
case "android":
t.Skip("Can't exec cmd/go subprocess on Android.")
case "darwin", "ios":
case "ios":
switch runtime.GOARCH {
case "arm64":
t.Skip("Can't exec cmd/go subprocess on iOS.")

View file

@ -118,11 +118,6 @@ func testMain(m *testing.M) int {
cc = append(cc, s[start:])
}
if GOOS == "darwin" || GOOS == "ios" {
// For Darwin/ARM.
// TODO: do we still need this?
cc = append(cc, []string{"-framework", "CoreFoundation", "-framework", "Foundation"}...)
}
if GOOS == "aix" {
// -Wl,-bnoobjreorder is mandatory to keep the same layout
// in .text section.

View file

@ -7,6 +7,13 @@ set to the clang wrapper that invokes clang for iOS. For example, this command r
GOOS=ios GOARCH=amd64 CGO_ENABLED=1 CC_FOR_TARGET=$(pwd)/../misc/ios/clangwrap.sh ./all.bash
If CC_FOR_TARGET is not set when the toolchain is built (make.bash or all.bash), CC
can be set on the command line. For example,
GOOS=ios GOARCH=amd64 CGO_ENABLED=1 CC=$(go env GOROOT)/misc/ios/clangwrap.sh go build
Setting CC is not necessary if the toolchain is built with CC_FOR_TARGET set.
To use the go tool to run individual programs and tests, put $GOROOT/bin into PATH to ensure
the go_ios_$GOARCH_exec wrapper is found. For example, to run the archive/tar tests:

View file

@ -28,7 +28,7 @@ func isASCII(s string) bool {
}
// toASCII converts the input to an ASCII C-style string.
// This a best effort conversion, so invalid characters are dropped.
// This is a best effort conversion, so invalid characters are dropped.
func toASCII(s string) string {
if isASCII(s) {
return s

View file

@ -16,10 +16,10 @@ import (
"go/parser"
"go/token"
"go/types"
exec "internal/execabs"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"

View file

@ -10,9 +10,9 @@ package main
import (
"fmt"
exec "internal/execabs"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"

View file

@ -305,7 +305,7 @@ func (p *Parser) pseudo(word string, operands [][]lex.Token) bool {
// references and writes symabis information to w.
//
// The symabis format is documented at
// cmd/compile/internal/gc.readSymABIs.
// cmd/compile/internal/ssagen.ReadSymABIs.
func (p *Parser) symDefRef(w io.Writer, word string, operands [][]lex.Token) {
switch word {
case "TEXT":

View file

@ -1549,7 +1549,14 @@ func (p *Package) gccBaseCmd() []string {
func (p *Package) gccMachine() []string {
switch goarch {
case "amd64":
if goos == "darwin" {
return []string{"-arch", "x86_64", "-m64"}
}
return []string{"-m64"}
case "arm64":
if goos == "darwin" {
return []string{"-arch", "arm64"}
}
case "386":
return []string{"-m32"}
case "arm":

View file

@ -14,10 +14,10 @@ import (
"go/ast"
"go/printer"
"go/token"
exec "internal/execabs"
"internal/xcoff"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
@ -953,9 +953,9 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
// Build the wrapper function compiled by gcc.
gccExport := ""
if goos == "windows" {
gccExport = "__declspec(dllexport)"
gccExport = "__declspec(dllexport) "
}
s := fmt.Sprintf("%s %s %s(", gccExport, gccResult, exp.ExpName)
s := fmt.Sprintf("%s%s %s(", gccExport, gccResult, exp.ExpName)
if fn.Recv != nil {
s += p.cgoType(fn.Recv.List[0].Type).C.String()
s += " recv"

View file

@ -8,9 +8,9 @@ import (
"bytes"
"fmt"
"go/token"
exec "internal/execabs"
"io/ioutil"
"os"
"os/exec"
)
// run runs the command argv, feeding in stdin on standard input.
@ -63,7 +63,7 @@ func run(stdin []byte, argv []string) (stdout, stderr []byte, ok bool) {
p.Env = append(os.Environ(), "TERM=dumb")
err := p.Run()
if _, ok := err.(*exec.ExitError); err != nil && !ok {
fatalf("%s", err)
fatalf("exec %s: %s", argv[0], err)
}
ok = p.ProcessState.Success()
stdout, stderr = bout.Bytes(), berr.Bytes()
@ -88,7 +88,7 @@ func fatalf(msg string, args ...interface{}) {
// If we've already printed other errors, they might have
// caused the fatal condition. Assume they're enough.
if nerrors == 0 {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
fmt.Fprintf(os.Stderr, "cgo: "+msg+"\n", args...)
}
os.Exit(2)
}

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package abi
import (
"cmd/compile/internal/types"
@ -28,7 +28,35 @@ type ABIParamResultInfo struct {
intSpillSlots int
floatSpillSlots int
offsetToSpillArea int64
config ABIConfig // to enable String() method
config *ABIConfig // to enable String() method
}
func (a *ABIParamResultInfo) InParams() []ABIParamAssignment {
return a.inparams
}
func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
return a.outparams
}
func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment {
return a.inparams[i]
}
func (a *ABIParamResultInfo) OutParam(i int) ABIParamAssignment {
return a.outparams[i]
}
func (a *ABIParamResultInfo) IntSpillCount() int {
return a.intSpillSlots
}
func (a *ABIParamResultInfo) FloatSpillCount() int {
return a.floatSpillSlots
}
func (a *ABIParamResultInfo) SpillAreaOffset() int64 {
return a.offsetToSpillArea
}
// RegIndex stores the index into the set of machine registers used by
@ -66,11 +94,17 @@ type ABIConfig struct {
regAmounts RegAmounts
}
// NewABIConfig returns a new ABI configuration for an architecture with
// iRegsCount integer/pointer registers and fRegsCount floating point registers.
func NewABIConfig(iRegsCount, fRegsCount int) *ABIConfig {
return &ABIConfig{RegAmounts{iRegsCount, fRegsCount}}
}
// ABIAnalyze takes a function type 't' and an ABI rules description
// 'config' and analyzes the function to determine how its parameters
// and results will be passed (in registers or on the stack), returning
// an ABIParamResultInfo object that holds the results of the analysis.
func ABIAnalyze(t *types.Type, config ABIConfig) ABIParamResultInfo {
func ABIAnalyze(t *types.Type, config *ABIConfig) ABIParamResultInfo {
setup()
s := assignState{
rTotal: config.regAmounts,
@ -124,7 +158,7 @@ func (c *RegAmounts) regString(r RegIndex) string {
// toString method renders an ABIParamAssignment in human-readable
// form, suitable for debugging or unit testing.
func (ri *ABIParamAssignment) toString(config ABIConfig) string {
func (ri *ABIParamAssignment) toString(config *ABIConfig) string {
regs := "R{"
for _, r := range ri.Registers {
regs += " " + config.regAmounts.regString(r)

View file

@ -1056,7 +1056,11 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpARM64LessThanF,
ssa.OpARM64LessEqualF,
ssa.OpARM64GreaterThanF,
ssa.OpARM64GreaterEqualF:
ssa.OpARM64GreaterEqualF,
ssa.OpARM64NotLessThanF,
ssa.OpARM64NotLessEqualF,
ssa.OpARM64NotGreaterThanF,
ssa.OpARM64NotGreaterEqualF:
// generate boolean values using CSET
p := s.Prog(arm64.ACSET)
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
@ -1100,10 +1104,16 @@ var condBits = map[ssa.Op]int16{
ssa.OpARM64GreaterThanU: arm64.COND_HI,
ssa.OpARM64GreaterEqual: arm64.COND_GE,
ssa.OpARM64GreaterEqualU: arm64.COND_HS,
ssa.OpARM64LessThanF: arm64.COND_MI,
ssa.OpARM64LessEqualF: arm64.COND_LS,
ssa.OpARM64GreaterThanF: arm64.COND_GT,
ssa.OpARM64GreaterEqualF: arm64.COND_GE,
ssa.OpARM64LessThanF: arm64.COND_MI, // Less than
ssa.OpARM64LessEqualF: arm64.COND_LS, // Less than or equal to
ssa.OpARM64GreaterThanF: arm64.COND_GT, // Greater than
ssa.OpARM64GreaterEqualF: arm64.COND_GE, // Greater than or equal to
// The following condition codes have unordered to handle comparisons related to NaN.
ssa.OpARM64NotLessThanF: arm64.COND_PL, // Greater than, equal to, or unordered
ssa.OpARM64NotLessEqualF: arm64.COND_HI, // Greater than or unordered
ssa.OpARM64NotGreaterThanF: arm64.COND_LE, // Less than, equal to or unordered
ssa.OpARM64NotGreaterEqualF: arm64.COND_LT, // Less than or unordered
}
var blockJump = map[ssa.BlockKind]struct {

View file

@ -6,12 +6,8 @@ package base
import (
"os"
"cmd/internal/obj"
)
var Ctxt *obj.Link
var atExitFuncs []func()
func AtExit(f func()) {

View file

@ -175,7 +175,7 @@ func ParseFlags() {
if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
}
parseSpectre(Flag.Spectre) // left as string for recordFlags
parseSpectre(Flag.Spectre) // left as string for RecordFlags
Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
Ctxt.Flag_optimize = Flag.N == 0

View file

@ -0,0 +1,36 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package base
import (
"cmd/internal/obj"
)
var Ctxt *obj.Link
// TODO(mdempsky): These should probably be obj.Link methods.
// PkgLinksym returns the linker symbol for name within the given
// package prefix. For user packages, prefix should be the package
// path encoded with objabi.PathToPrefix.
func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym {
if name == "_" {
// TODO(mdempsky): Cleanup callers and Fatalf instead.
return linksym(prefix, "_", abi)
}
return linksym(prefix, prefix+"."+name, abi)
}
// Linkname returns the linker symbol for the given name as it might
// appear within a //go:linkname directive.
func Linkname(name string, abi obj.ABI) *obj.LSym {
return linksym("_", name, abi)
}
// linksym is an internal helper function for implementing the above
// exported APIs.
func linksym(pkg, name string, abi obj.ABI) *obj.LSym {
return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg })
}

View file

@ -121,7 +121,7 @@ func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
lasterror.syntax = pos
} else {
// only one of multiple equal non-syntax errors per line
// (flusherrors shows only one of them, so we filter them
// (FlushErrors shows only one of them, so we filter them
// here as best as we can (they may not appear in order)
// so that we don't count them here and exit early, and
// then have nothing to show for.)

View file

@ -37,7 +37,7 @@ func NewBulk(nbit int32, count int32) Bulk {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return Bulk{
words: make([]uint32, size),

View file

@ -75,7 +75,7 @@ func stmts(nn *ir.Nodes) {
// might be the target of a goto. See issue 28616.
if body := body; len(body) != 0 {
switch body[(len(body) - 1)].Op() {
case ir.ORETURN, ir.ORETJMP, ir.OPANIC:
case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
if i > lastLabel {
cut = true
}

View file

@ -28,7 +28,7 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope,
if fn.Nname != nil {
expect := fn.Linksym()
if fnsym.ABI() == obj.ABI0 {
expect = fn.Sym().LinksymABI0()
expect = fn.LinksymABI(obj.ABI0)
}
if fnsym != expect {
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
@ -136,7 +136,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
var decls []*ir.Name
var selected map[*ir.Name]bool
var selected ir.NameSet
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
@ -161,7 +161,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
// For non-SSA-able arguments, however, the correct information
// is known -- they have a single home on the stack.
for _, n := range dcl {
if _, found := selected[n]; found {
if selected.Has(n) {
continue
}
c := n.Sym().Name[0]
@ -186,19 +186,11 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
isReturnValue := (n.Class == ir.PPARAMOUT)
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
} else if n.Class == ir.PAUTOHEAP {
// If dcl in question has been promoted to heap, do a bit
// of extra work to recover original class (auto or param);
// see issue 30908. This insures that we get the proper
// signature in the abstract function DIE, but leaves a
// misleading location for the param (we want pointer-to-heap
// and not stack).
}
if n.Esc() == ir.EscHeap {
// The variable in question has been promoted to the heap.
// Its address is in n.Heapaddr.
// TODO(thanm): generate a better location expression
stackcopy := n.Stackcopy
if stackcopy != nil && (stackcopy.Class == ir.PPARAM || stackcopy.Class == ir.PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
isReturnValue = (stackcopy.Class == ir.PPARAMOUT)
}
}
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
@ -252,10 +244,10 @@ func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
var vars []*dwarf.Var
var decls []*ir.Name
selected := make(map[*ir.Name]bool)
var selected ir.NameSet
for _, n := range apDecls {
if ir.IsAutoTmp(n) {
continue
@ -263,7 +255,7 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf
decls = append(decls, n)
vars = append(vars, createSimpleVar(fnsym, n))
selected[n] = true
selected.Add(n)
}
return decls, vars, selected
}
@ -320,19 +312,19 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
var decls []*ir.Name
var vars []*dwarf.Var
ssaVars := make(map[*ir.Name]bool)
var ssaVars ir.NameSet
for varID, dvar := range debugInfo.Vars {
n := dvar
ssaVars[n] = true
ssaVars.Add(n)
for _, slot := range debugInfo.VarSlots[varID] {
ssaVars[debugInfo.Slots[slot].N] = true
ssaVars.Add(debugInfo.Slots[slot].N)
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {

View file

@ -218,6 +218,10 @@ func Batch(fns []*ir.Func, recursive bool) {
// Construct data-flow graph from syntax trees.
for _, fn := range fns {
if base.Flag.W > 1 {
s := fmt.Sprintf("\nbefore escape %v", fn)
ir.Dump(s, fn)
}
b.initFunc(fn)
}
for _, fn := range fns {
@ -534,8 +538,8 @@ func (e *escape) stmt(n ir.Node) {
e.stmts(n.Call.Init())
e.call(nil, n.Call, n)
case ir.ORETJMP:
// TODO(mdempsky): What do? esc.go just ignores it.
case ir.OTAILCALL:
// TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
}
}
@ -585,7 +589,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
default:
base.Fatalf("unexpected expr: %v", n)
case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR:
case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
// nop
case ir.ONAME:
@ -598,10 +602,6 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
}
e.flow(k, e.oldLoc(n))
case ir.ONAMEOFFSET:
n := n.(*ir.NameOffsetExpr)
e.expr(k, n.Name_)
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
n := n.(*ir.UnaryExpr)
e.discard(n.X)
@ -856,7 +856,7 @@ func (e *escape) discards(l ir.Nodes) {
}
}
// addr evaluates an addressable expression n and returns an EscHole
// addr evaluates an addressable expression n and returns a hole
// that represents storing into the represented location.
func (e *escape) addr(n ir.Node) hole {
if n == nil || ir.IsBlank(n) {
@ -875,9 +875,8 @@ func (e *escape) addr(n ir.Node) hole {
break
}
k = e.oldLoc(n).asHole()
case ir.ONAMEOFFSET:
n := n.(*ir.NameOffsetExpr)
k = e.addr(n.Name_)
case ir.OLINKSYMOFFSET:
break
case ir.ODOT:
n := n.(*ir.SelectorExpr)
k = e.addr(n.X)
@ -1658,7 +1657,14 @@ func (b *batch) finish(fns []*ir.Func) {
// Update n.Esc based on escape analysis results.
if loc.escapes {
if n.Op() != ir.ONAME {
if n.Op() == ir.ONAME {
if base.Flag.CompilingRuntime {
base.ErrorfAt(n.Pos(), "%v escapes to heap, not allowed in runtime", n)
}
if base.Flag.LowerM != 0 {
base.WarnfAt(n.Pos(), "moved to heap: %v", n)
}
} else {
if base.Flag.LowerM != 0 {
base.WarnfAt(n.Pos(), "%v escapes to heap", n)
}
@ -1668,7 +1674,6 @@ func (b *batch) finish(fns []*ir.Func) {
}
}
n.SetEsc(ir.EscHeap)
addrescapes(n)
} else {
if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
base.WarnfAt(n.Pos(), "%v does not escape", n)
@ -1779,7 +1784,7 @@ func (l leaks) Encode() string {
return s
}
// parseLeaks parses a binary string representing an EscLeaks.
// parseLeaks parses a binary string representing a leaks
func parseLeaks(s string) leaks {
var l leaks
if !strings.HasPrefix(s, "esc:") {
@ -2014,165 +2019,6 @@ func HeapAllocReason(n ir.Node) string {
return ""
}
// addrescapes tags node n as having had its address taken
// by "increasing" the "value" of n.Esc to EscHeap.
// Storage is allocated as necessary to allow the address
// to be taken.
func addrescapes(n ir.Node) {
switch n.Op() {
default:
// Unexpected Op, probably due to a previous type error. Ignore.
case ir.ODEREF, ir.ODOTPTR:
// Nothing to do.
case ir.ONAME:
n := n.(*ir.Name)
if n == ir.RegFP {
break
}
// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
// on PPARAM it means something different.
if n.Class == ir.PAUTO && n.Esc() == ir.EscNever {
break
}
// If a closure reference escapes, mark the outer variable as escaping.
if n.IsClosureVar() {
addrescapes(n.Defn)
break
}
if n.Class != ir.PPARAM && n.Class != ir.PPARAMOUT && n.Class != ir.PAUTO {
break
}
// This is a plain parameter or local variable that needs to move to the heap,
// but possibly for the function outside the one we're compiling.
// That is, if we have:
//
// func f(x int) {
// func() {
// global = &x
// }
// }
//
// then we're analyzing the inner closure but we need to move x to the
// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
oldfn := ir.CurFunc
ir.CurFunc = n.Curfn
ln := base.Pos
base.Pos = ir.CurFunc.Pos()
moveToHeap(n)
ir.CurFunc = oldfn
base.Pos = ln
// ODOTPTR has already been introduced,
// so these are the non-pointer ODOT and OINDEX.
// In &x[0], if x is a slice, then x does not
// escape--the pointer inside x does, but that
// is always a heap pointer anyway.
case ir.ODOT:
n := n.(*ir.SelectorExpr)
addrescapes(n.X)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
if !n.X.Type().IsSlice() {
addrescapes(n.X)
}
case ir.OPAREN:
n := n.(*ir.ParenExpr)
addrescapes(n.X)
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
addrescapes(n.X)
}
}
// moveToHeap records the parameter or local variable n as moved to the heap.
func moveToHeap(n *ir.Name) {
if base.Flag.LowerR != 0 {
ir.Dump("MOVE", n)
}
if base.Flag.CompilingRuntime {
base.Errorf("%v escapes to heap, not allowed in runtime", n)
}
if n.Class == ir.PAUTOHEAP {
ir.Dump("n", n)
base.Fatalf("double move to heap")
}
// Allocate a local stack variable to hold the pointer to the heap copy.
// temp will add it to the function declaration list automatically.
heapaddr := typecheck.Temp(types.NewPtr(n.Type()))
heapaddr.SetSym(typecheck.Lookup("&" + n.Sym().Name))
heapaddr.SetPos(n.Pos())
// Unset AutoTemp to persist the &foo variable name through SSA to
// liveness analysis.
// TODO(mdempsky/drchase): Cleaner solution?
heapaddr.SetAutoTemp(false)
// Parameters have a local stack copy used at function start/end
// in addition to the copy in the heap that may live longer than
// the function.
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
if n.FrameOffset() == types.BADWIDTH {
base.Fatalf("addrescapes before param assignment")
}
// We rewrite n below to be a heap variable (indirection of heapaddr).
// Preserve a copy so we can still write code referring to the original,
// and substitute that copy into the function declaration list
// so that analyses of the local (on-stack) variables use it.
stackcopy := typecheck.NewName(n.Sym())
stackcopy.SetType(n.Type())
stackcopy.SetFrameOffset(n.FrameOffset())
stackcopy.Class = n.Class
stackcopy.Heapaddr = heapaddr
if n.Class == ir.PPARAMOUT {
// Make sure the pointer to the heap copy is kept live throughout the function.
// The function could panic at any point, and then a defer could recover.
// Thus, we need the pointer to the heap copy always available so the
// post-deferreturn code can copy the return value back to the stack.
// See issue 16095.
heapaddr.SetIsOutputParamHeapAddr(true)
}
n.Stackcopy = stackcopy
// Substitute the stackcopy into the function variable list so that
// liveness and other analyses use the underlying stack slot
// and not the now-pseudo-variable n.
found := false
for i, d := range ir.CurFunc.Dcl {
if d == n {
ir.CurFunc.Dcl[i] = stackcopy
found = true
break
}
// Parameters are before locals, so can stop early.
// This limits the search even in functions with many local variables.
if d.Class == ir.PAUTO {
break
}
}
if !found {
base.Fatalf("cannot find %v in local variable list", n)
}
ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
}
// Modify n in place so that uses of n now mean indirection of the heapaddr.
n.Class = ir.PAUTOHEAP
n.SetFrameOffset(0)
n.Heapaddr = heapaddr
n.SetEsc(ir.EscHeap)
if base.Flag.LowerM != 0 {
base.WarnfAt(n.Pos(), "moved to heap: %v", n)
}
}
// This special tag is applied to uintptr variables
// that we believe may hold unsafe.Pointers for
// calls into assembly functions.

View file

@ -13,7 +13,6 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@ -73,7 +72,7 @@ func enqueueFunc(fn *ir.Func) {
func prepareFunc(fn *ir.Func) {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
// (e.g. in MarkTypeUsedInInterface).
ssagen.InitLSym(fn, true)
// Calculate parameter offsets.
@ -84,24 +83,6 @@ func prepareFunc(fn *ir.Func) {
walk.Walk(fn)
ir.CurFunc = nil // enforce no further uses of CurFunc
typecheck.DeclContext = ir.PEXTERN
// Make sure type syms are declared for all types that might
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
for _, n := range fn.Dcl {
switch n.Class {
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
if liveness.ShouldTrack(n) && n.Addrtaken() {
reflectdata.WriteType(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
if fn.LSym.Func().StackObjects == nil {
fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
}
}
}
}
}
// compileFunctions compiles all functions in compilequeue.

View file

@ -96,16 +96,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
ir.Pkgs.Itab.Prefix = "go.itab" // not go%2eitab
ir.Pkgs.Itablink = types.NewPkg("go.itablink", "go.itablink")
ir.Pkgs.Itablink.Prefix = "go.itablink" // not go%2eitablink
ir.Pkgs.Track = types.NewPkg("go.track", "go.track")
ir.Pkgs.Track.Prefix = "go.track" // not go%2etrack
// pseudo-package used for map zero values
ir.Pkgs.Map = types.NewPkg("go.map", "go.map")
ir.Pkgs.Map.Prefix = "go.map"
// pseudo-package used for methods with anonymous receivers
ir.Pkgs.Go = types.NewPkg("go", "")
@ -121,7 +111,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
log.Fatalf("compiler not built with support for -t")
}
// Enable inlining (after recordFlags, to avoid recording the rewritten -l). For now:
// Enable inlining (after RecordFlags, to avoid recording the rewritten -l). For now:
// default: inlining on. (Flag.LowerL == 1)
// -l: inlining off (Flag.LowerL == 0)
// -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1)
@ -160,12 +150,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ssagen.Arch.LinkArch.Init(base.Ctxt)
startProfile()
if base.Flag.Race {
ir.Pkgs.Race = types.NewPkg("runtime/race", "")
}
if base.Flag.MSan {
ir.Pkgs.Msan = types.NewPkg("runtime/msan", "")
}
if base.Flag.Race || base.Flag.MSan {
base.Flag.Cfg.Instrumenting = true
}
@ -193,7 +177,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
typecheck.Target = new(ir.Package)
typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): typenamesym for lock?
typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
@ -261,7 +245,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
escape.Funcs(typecheck.Target.Decls)
// Collect information for go:nowritebarrierrec
// checking. This must happen before transformclosure.
// checking. This must happen before transforming closures during Walk
// We'll do the final check after write barriers are
// inserted.
if base.Flag.CompilingRuntime {
@ -269,7 +253,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
}
// Prepare for SSA compilation.
// This must be before peekitabs, because peekitabs
// This must be before CompileITabs, because CompileITabs
// can trigger function compilation.
typecheck.InitRuntime()
ssagen.InitConfig()

View file

@ -111,7 +111,6 @@ func dumpdata() {
numDecls := len(typecheck.Target.Decls)
dumpglobls(typecheck.Target.Externs)
staticdata.WriteFuncSyms()
reflectdata.CollectPTabs()
numExports := len(typecheck.Target.Exports)
addsignats(typecheck.Target.Externs)
@ -122,7 +121,7 @@ func dumpdata() {
reflectdata.WriteBasicTypes()
dumpembeds()
// Calls to dumpsignats can generate functions,
// Calls to WriteRuntimeTypes can generate functions,
// like method wrappers and hash and equality routines.
// Compile any generated functions, process any new resulting types, repeat.
// This can't loop forever, because there is no way to generate an infinite
@ -147,10 +146,11 @@ func dumpdata() {
dumpglobls(typecheck.Target.Externs[numExterns:])
if reflectdata.ZeroSize > 0 {
zero := ir.Pkgs.Map.Lookup("zero").Linksym()
zero := base.PkgLinksym("go.map", "zero", obj.ABI0)
objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
}
staticdata.WriteFuncSyms()
addGCLocals()
if numExports != len(typecheck.Target.Exports) {

View file

@ -4,7 +4,7 @@
//
// The inlining facility makes 2 passes: first caninl determines which
// functions are suitable for inlining, and for those that are it
// saves a copy of the body. Then inlcalls walks each function body to
// saves a copy of the body. Then InlineCalls walks each function body to
// expand calls to inlinable functions.
//
// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
@ -27,7 +27,6 @@
package inline
import (
"errors"
"fmt"
"go/constant"
"strings"
@ -74,12 +73,12 @@ func InlinePackage() {
})
}
// Caninl determines whether fn is inlineable.
// CanInline determines whether fn is inlineable.
// If so, CanInline saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
func CanInline(fn *ir.Func) {
if fn.Nname == nil {
base.Fatalf("caninl no nname %+v", fn)
base.Fatalf("CanInline no nname %+v", fn)
}
var reason string // reason, if any, that the function was not inlined
@ -144,7 +143,7 @@ func CanInline(fn *ir.Func) {
}
if fn.Typecheck() == 0 {
base.Fatalf("caninl on non-typechecked function %v", fn)
base.Fatalf("CanInline on non-typechecked function %v", fn)
}
n := fn.Nname
@ -170,7 +169,6 @@ func CanInline(fn *ir.Func) {
visitor := hairyVisitor{
budget: inlineMaxBudget,
extraCallCost: cc,
usedLocals: make(map[*ir.Name]bool),
}
if visitor.tooHairy(fn) {
reason = visitor.reason
@ -180,7 +178,7 @@ func CanInline(fn *ir.Func) {
n.Func.Inl = &ir.Inline{
Cost: inlineMaxBudget - visitor.budget,
Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
Body: ir.DeepCopyList(src.NoXPos, fn.Body),
Body: inlcopylist(fn.Body),
}
if base.Flag.LowerM > 1 {
@ -200,11 +198,11 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
return
}
if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class)
base.Fatalf("Inline_Flood: unexpected %v, %v, %v", n, n.Op(), n.Class)
}
fn := n.Func
if fn == nil {
base.Fatalf("inlFlood: missing Func on %v", n)
base.Fatalf("Inline_Flood: missing Func on %v", n)
}
if fn.Inl == nil {
return
@ -217,10 +215,8 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
typecheck.ImportedBody(fn)
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
var doFlood func(n ir.Node)
doFlood = func(n ir.Node) {
switch n.Op() {
case ir.OMETHEXPR, ir.ODOTMETH:
Inline_Flood(ir.MethodExprName(n), exportsym)
@ -239,15 +235,16 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
// Okay, because we don't yet inline indirect
// calls to method values.
case ir.OCLOSURE:
// If the closure is inlinable, we'll need to
// flood it too. But today we don't support
// inlining functions that contain closures.
//
// When we do, we'll probably want:
// inlFlood(n.Func.Closure.Func.Nname)
base.Fatalf("unexpected closure in inlinable function")
// VisitList doesn't visit closure bodies, so force a
// recursive call to VisitList on the body of the closure.
ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
}
})
}
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
ir.VisitList(ir.Nodes(fn.Inl.Body), doFlood)
}
// hairyVisitor visits a function body to determine its inlining
@ -256,18 +253,13 @@ type hairyVisitor struct {
budget int32
reason string
extraCallCost int32
usedLocals map[*ir.Name]bool
do func(ir.Node) error
usedLocals ir.NameSet
do func(ir.Node) bool
}
var errBudget = errors.New("too expensive")
func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
v.do = v.doNode // cache closure
err := errChildren(fn, v.do)
if err != nil {
v.reason = err.Error()
if ir.DoChildren(fn, v.do) {
return true
}
if v.budget < 0 {
@ -277,11 +269,10 @@ func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
return false
}
func (v *hairyVisitor) doNode(n ir.Node) error {
func (v *hairyVisitor) doNode(n ir.Node) bool {
if n == nil {
return nil
return false
}
switch n.Op() {
// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLFUNC:
@ -295,7 +286,8 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) {
fn := name.Sym().Name
if fn == "getcallerpc" || fn == "getcallersp" {
return errors.New("call to " + fn)
v.reason = "call to " + fn
return true
}
if fn == "throw" {
v.budget -= inlineExtraThrowCost
@ -346,38 +338,61 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
v.budget -= v.extraCallCost
case ir.OPANIC:
n := n.(*ir.UnaryExpr)
if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() {
// Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining.
// Before CL 284412, these conversions were introduced later in the
// compiler, so they didn't count against inlining budget.
v.budget++
}
v.budget -= inlineExtraPanicCost
case ir.ORECOVER:
// recover matches the argument frame pointer to find
// the right panic value, so it needs an argument frame.
return errors.New("call to recover")
v.reason = "call to recover"
return true
case ir.OCLOSURE,
ir.ORANGE,
case ir.OCLOSURE:
// TODO(danscales,mdempsky): Get working with -G.
// Probably after #43818 is fixed.
if base.Flag.G > 0 {
v.reason = "inlining closures not yet working with -G"
return true
}
// TODO(danscales) - fix some bugs when budget is lowered below 30
// Maybe make budget proportional to number of closure variables, e.g.:
//v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3)
v.budget -= 30
case ir.ORANGE,
ir.OSELECT,
ir.OGO,
ir.ODEFER,
ir.ODCLTYPE, // can't print yet
ir.ORETJMP:
return errors.New("unhandled op " + n.Op().String())
ir.OTAILCALL:
v.reason = "unhandled op " + n.Op().String()
return true
case ir.OAPPEND:
v.budget -= inlineExtraAppendCost
case ir.ODCLCONST, ir.OFALL:
// These nodes don't produce code; omit from inlining budget.
return nil
return false
case ir.OFOR, ir.OFORUNTIL:
n := n.(*ir.ForStmt)
if n.Label != nil {
return errors.New("labeled control")
v.reason = "labeled control"
return true
}
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
if n.Label != nil {
return errors.New("labeled control")
v.reason = "labeled control"
return true
}
// case ir.ORANGE, ir.OSELECT in "unhandled" above
@ -393,22 +408,15 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
if ir.IsConst(n.Cond, constant.Bool) {
// This if and the condition cost nothing.
// TODO(rsc): It seems strange that we visit the dead branch.
if err := errList(n.Init(), v.do); err != nil {
return err
}
if err := errList(n.Body, v.do); err != nil {
return err
}
if err := errList(n.Else, v.do); err != nil {
return err
}
return nil
return doList(n.Init(), v.do) ||
doList(n.Body, v.do) ||
doList(n.Else, v.do)
}
case ir.ONAME:
n := n.(*ir.Name)
if n.Class == ir.PAUTO {
v.usedLocals[n] = true
v.usedLocals.Add(n)
}
case ir.OBLOCK:
@ -428,10 +436,11 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
// When debugging, don't stop early, to get full cost of inlining this function
if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
return errBudget
v.reason = "too expensive"
return true
}
return errChildren(n, v.do)
return ir.DoChildren(n, v.do)
}
func isBigFunc(fn *ir.Func) bool {
@ -442,6 +451,52 @@ func isBigFunc(fn *ir.Func) bool {
})
}
// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
// the body and dcls of an inlineable function.
func inlcopylist(ll []ir.Node) []ir.Node {
s := make([]ir.Node, len(ll))
for i, n := range ll {
s[i] = inlcopy(n)
}
return s
}
// inlcopy is like DeepCopy(), but does extra work to copy closures.
func inlcopy(n ir.Node) ir.Node {
var edit func(ir.Node) ir.Node
edit = func(x ir.Node) ir.Node {
switch x.Op() {
case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
return x
}
m := ir.Copy(x)
ir.EditChildren(m, edit)
if x.Op() == ir.OCLOSURE {
x := x.(*ir.ClosureExpr)
// Need to save/duplicate x.Func.Nname,
// x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and
// x.Func.Body for iexport and local inlining.
oldfn := x.Func
newfn := ir.NewFunc(oldfn.Pos())
if oldfn.ClosureCalled() {
newfn.SetClosureCalled(true)
}
m.(*ir.ClosureExpr).Func = newfn
newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
// XXX OK to share fn.Type() ??
newfn.Nname.SetType(oldfn.Nname.Type())
newfn.Nname.Ntype = inlcopy(oldfn.Nname.Ntype).(ir.Ntype)
newfn.Body = inlcopylist(oldfn.Body)
// Make shallow copy of the Dcl and ClosureVar slices
newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...)
newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...)
}
return m
}
return edit(n)
}
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
func InlineCalls(fn *ir.Func) {
@ -762,13 +817,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
if ln.Class == ir.PPARAMOUT { // return values handled below.
continue
}
if ir.IsParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap
// TODO(mdempsky): Remove once I'm confident
// this never actually happens. We currently
// perform inlining before escape analysis, so
// nothing should have moved to the heap yet.
base.Fatalf("impossible: %v", ln)
}
inlf := typecheck.Expr(inlvar(ln)).(*ir.Name)
inlvars[ln] = inlf
if base.Flag.GenDwarfInl > 0 {
@ -925,6 +973,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
inlvars: inlvars,
bases: make(map[*src.PosBase]*src.PosBase),
newInlIndex: newIndex,
fn: fn,
}
subst.edit = subst.node
@ -1031,6 +1080,12 @@ type inlsubst struct {
newInlIndex int
edit func(ir.Node) ir.Node // cached copy of subst.node method value closure
// If non-nil, we are inside a closure inside the inlined function, and
// newclofn is the Func of the new inlined closure.
newclofn *ir.Func
fn *ir.Func // For debug -- the func that is being inlined
}
// list inlines a list of nodes.
@ -1042,6 +1097,157 @@ func (subst *inlsubst) list(ll ir.Nodes) []ir.Node {
return s
}
// fields returns a list of the fields of a struct type representing receiver,
// params, or results, after duplicating the field nodes and substituting the
// Nname nodes inside the field nodes.
func (subst *inlsubst) fields(oldt *types.Type) []*types.Field {
oldfields := oldt.FieldSlice()
newfields := make([]*types.Field, len(oldfields))
for i := range oldfields {
newfields[i] = oldfields[i].Copy()
if oldfields[i].Nname != nil {
newfields[i].Nname = subst.node(oldfields[i].Nname.(*ir.Name))
}
}
return newfields
}
// clovar creates a new ONAME node for a local variable or param of a closure
// inside a function being inlined.
func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
// TODO(danscales): want to get rid of this shallow copy, with code like the
// following, but it is hard to copy all the necessary flags in a maintainable way.
// m := ir.NewNameAt(n.Pos(), n.Sym())
// m.Class = n.Class
// m.SetType(n.Type())
// m.SetTypecheck(1)
//if n.IsClosureVar() {
// m.SetIsClosureVar(true)
//}
m := &ir.Name{}
*m = *n
m.Curfn = subst.newclofn
if n.Defn != nil && n.Defn.Op() == ir.ONAME {
if !n.IsClosureVar() {
base.FatalfAt(n.Pos(), "want closure variable, got: %+v", n)
}
if n.Sym().Pkg != types.LocalPkg {
// If the closure came from inlining a function from
// another package, must change package of captured
// variable to localpkg, so that the fields of the closure
// struct are local package and can be accessed even if
// name is not exported. If you disable this code, you can
// reproduce the problem by running 'go test
// go/internal/srcimporter'. TODO(mdempsky) - maybe change
// how we create closure structs?
m.SetSym(types.LocalPkg.Lookup(n.Sym().Name))
}
// Make sure any inlvar which is the Defn
// of an ONAME closure var is rewritten
// during inlining. Don't substitute
// if Defn node is outside inlined function.
if subst.inlvars[n.Defn.(*ir.Name)] != nil {
m.Defn = subst.node(n.Defn)
}
}
if n.Outer != nil {
// Either the outer variable is defined in function being inlined,
// and we will replace it with the substituted variable, or it is
// defined outside the function being inlined, and we should just
// skip the outer variable (the closure variable of the function
// being inlined).
s := subst.node(n.Outer).(*ir.Name)
if s == n.Outer {
s = n.Outer.Outer
}
m.Outer = s
}
return m
}
// closure does the necessary substitions for a ClosureExpr n and returns the new
// closure node.
func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
m := ir.Copy(n)
m.SetPos(subst.updatedPos(m.Pos()))
ir.EditChildren(m, subst.edit)
//fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc))
// The following is similar to funcLit
oldfn := n.Func
newfn := ir.NewFunc(oldfn.Pos())
// These three lines are not strictly necessary, but just to be clear
// that new function needs to redo typechecking and inlinability.
newfn.SetTypecheck(0)
newfn.SetInlinabilityChecked(false)
newfn.Inl = nil
newfn.SetIsHiddenClosure(true)
newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym())
newfn.Nname.Func = newfn
newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype)
newfn.Nname.Defn = newfn
m.(*ir.ClosureExpr).Func = newfn
newfn.OClosure = m.(*ir.ClosureExpr)
if subst.newclofn != nil {
//fmt.Printf("Inlining a closure with a nested closure\n")
}
prevxfunc := subst.newclofn
// Mark that we are now substituting within a closure (within the
// inlined function), and create new nodes for all the local
// vars/params inside this closure.
subst.newclofn = newfn
newfn.Dcl = nil
newfn.ClosureVars = nil
for _, oldv := range oldfn.Dcl {
newv := subst.clovar(oldv)
subst.inlvars[oldv] = newv
newfn.Dcl = append(newfn.Dcl, newv)
}
for _, oldv := range oldfn.ClosureVars {
newv := subst.clovar(oldv)
subst.inlvars[oldv] = newv
newfn.ClosureVars = append(newfn.ClosureVars, newv)
}
// Need to replace ONAME nodes in
// newfn.Type().FuncType().Receiver/Params/Results.FieldSlice().Nname
oldt := oldfn.Type()
newrecvs := subst.fields(oldt.Recvs())
var newrecv *types.Field
if len(newrecvs) > 0 {
newrecv = newrecvs[0]
}
newt := types.NewSignature(oldt.Pkg(), newrecv,
subst.fields(oldt.Params()), subst.fields(oldt.Results()))
newfn.Nname.SetType(newt)
newfn.Body = subst.list(oldfn.Body)
// Remove the nodes for the current closure from subst.inlvars
for _, oldv := range oldfn.Dcl {
delete(subst.inlvars, oldv)
}
for _, oldv := range oldfn.ClosureVars {
delete(subst.inlvars, oldv)
}
// Go back to previous closure func
subst.newclofn = prevxfunc
// Actually create the named function for the closure, now that
// the closure is inlined in a specific function.
m.SetTypecheck(0)
if oldfn.ClosureCalled() {
typecheck.Callee(m)
} else {
typecheck.Expr(m)
}
return m
}
// node recursively copies a node from the saved pristine body of the
// inlined function, substituting references to input/output
// parameters with ones to the tmpnames, and substituting returns with
@ -1056,13 +1262,17 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
n := n.(*ir.Name)
// Handle captured variables when inlining closures.
if n.IsClosureVar() {
if n.IsClosureVar() && subst.newclofn == nil {
o := n.Outer
// Deal with case where sequence of closures are inlined.
// TODO(danscales) - write test case to see if we need to
// go up multiple levels.
if o.Curfn != ir.CurFunc {
o = o.Outer
}
// make sure the outer param matches the inlining location
// NB: if we enabled inlining of functions containing OCLOSURE or refined
// the reassigned check via some sort of copy propagation this would most
// likely need to be changed to a loop to walk up to the correct Param
if o == nil || o.Curfn != ir.CurFunc {
base.Fatalf("%v: unresolvable capture %v\n", ir.Line(n), n)
}
@ -1098,6 +1308,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
}
case ir.ORETURN:
if subst.newclofn != nil {
// Don't do special substitutions if inside a closure
break
}
// Since we don't handle bodies with closures,
// this return is guaranteed to belong to the current inlined function.
n := n.(*ir.ReturnStmt)
@ -1136,6 +1350,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
return m
case ir.OLABEL:
if subst.newclofn != nil {
// Don't do special substitutions if inside a closure
break
}
n := n.(*ir.LabelStmt)
m := ir.Copy(n).(*ir.LabelStmt)
m.SetPos(subst.updatedPos(m.Pos()))
@ -1143,10 +1361,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen)
m.Label = typecheck.Lookup(p)
return m
}
if n.Op() == ir.OCLOSURE {
base.Fatalf("cannot inline function containing closure: %+v", n)
case ir.OCLOSURE:
return subst.closure(n.(*ir.ClosureExpr))
}
m := ir.Copy(n)
@ -1171,7 +1389,7 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
s := make([]*ir.Name, 0, len(ll))
for _, n := range ll {
if n.Class == ir.PAUTO {
if _, found := vis.usedLocals[n]; !found {
if !vis.usedLocals.Has(n) {
continue
}
}
@ -1191,21 +1409,13 @@ func numNonClosures(list []*ir.Func) int {
return count
}
// TODO(mdempsky): Update inl.go to use ir.DoChildren directly.
func errChildren(n ir.Node, do func(ir.Node) error) (err error) {
ir.DoChildren(n, func(x ir.Node) bool {
err = do(x)
return err != nil
})
return
}
func errList(list []ir.Node, do func(ir.Node) error) error {
func doList(list []ir.Node, do func(ir.Node) bool) bool {
for _, x := range list {
if x != nil {
if err := do(x); err != nil {
return err
if do(x) {
return true
}
}
}
return nil
return false
}

View file

@ -77,7 +77,7 @@ func ConstOverflow(v constant.Value, t *types.Type) bool {
ft := types.FloatForComplex(t)
return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)
}
base.Fatalf("doesoverflow: %v, %v", v, t)
base.Fatalf("ConstOverflow: %v, %v", v, t)
panic("unreachable")
}

View file

@ -8,6 +8,7 @@ import (
"bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
"go/constant"
@ -32,8 +33,7 @@ type miniExpr struct {
}
const (
miniExprHasCall = 1 << iota
miniExprNonNil
miniExprNonNil = 1 << iota
miniExprTransient
miniExprBounded
miniExprImplicit // for use by implementations; not supported by every Expr
@ -44,8 +44,6 @@ func (*miniExpr) isExpr() {}
func (n *miniExpr) Type() *types.Type { return n.typ }
func (n *miniExpr) SetType(x *types.Type) { n.typ = x }
func (n *miniExpr) HasCall() bool { return n.flags&miniExprHasCall != 0 }
func (n *miniExpr) SetHasCall(b bool) { n.flags.set(miniExprHasCall, b) }
func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 }
func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil }
func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 }
@ -145,7 +143,7 @@ func (n *BinaryExpr) SetOp(op Op) {
}
// A CallUse records how the result of the call is used:
type CallUse int
type CallUse byte
const (
_ CallUse = iota
@ -161,7 +159,6 @@ type CallExpr struct {
origNode
X Node
Args Nodes
Rargs Nodes // TODO(rsc): Delete.
KeepAlive []*Name // vars to be kept alive until call returns
IsDDD bool
Use CallUse
@ -464,21 +461,35 @@ func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr {
return n
}
// A NameOffsetExpr refers to an offset within a variable.
// A LinksymOffsetExpr refers to an offset within a global variable.
// It is like a SelectorExpr but without the field name.
type NameOffsetExpr struct {
type LinksymOffsetExpr struct {
miniExpr
Name_ *Name
Linksym *obj.LSym
Offset_ int64
}
func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *NameOffsetExpr {
n := &NameOffsetExpr{Name_: name, Offset_: offset}
func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *LinksymOffsetExpr {
n := &LinksymOffsetExpr{Linksym: lsym, Offset_: offset}
n.typ = typ
n.op = ONAMEOFFSET
n.op = OLINKSYMOFFSET
return n
}
// NewLinksymExpr is NewLinksymOffsetExpr, but with offset fixed at 0.
func NewLinksymExpr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *LinksymOffsetExpr {
return NewLinksymOffsetExpr(pos, lsym, 0, typ)
}
// NewNameOffsetExpr is NewLinksymOffsetExpr, but taking a *Name
// representing a global variable instead of an *obj.LSym directly.
func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *LinksymOffsetExpr {
if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) {
base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name)
}
return NewLinksymOffsetExpr(pos, name.Linksym(), offset, typ)
}
// A SelectorExpr is a selector expression X.Sel.
type SelectorExpr struct {
miniExpr
@ -612,11 +623,9 @@ type TypeAssertExpr struct {
X Node
Ntype Ntype
// Runtime type information provided by walkDotType.
// Caution: These aren't always populated; see walkDotType.
SrcType *AddrExpr `mknode:"-"` // *runtime._type for X's type
DstType *AddrExpr `mknode:"-"` // *runtime._type for Type
Itab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type
// Runtime type information provided by walkDotType for
// assertions from non-empty interface to concrete type.
Itab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type
}
func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr {
@ -656,7 +665,7 @@ func (n *UnaryExpr) SetOp(op Op) {
case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW,
OOFFSETOF, OPANIC, OREAL, OSIZEOF,
OCHECKNIL, OCFUNC, OIDATA, OITAB, ONEWOBJ, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
n.op = op
}
}
@ -728,7 +737,7 @@ func IsAddressable(n Node) bool {
}
return true
case ONAMEOFFSET:
case OLINKSYMOFFSET:
return true
}

View file

@ -378,9 +378,9 @@ func stmtFmt(n Node, s fmt.State) {
n := n.(*ReturnStmt)
fmt.Fprintf(s, "return %.v", n.Results)
case ORETJMP:
n := n.(*BranchStmt)
fmt.Fprintf(s, "retjmp %v", n.Label)
case OTAILCALL:
n := n.(*TailCallStmt)
fmt.Fprintf(s, "tailcall %v", n.Target)
case OINLMARK:
n := n.(*InlineMarkStmt)
@ -589,20 +589,20 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
if n.Type() == types.UntypedRune {
switch x, ok := constant.Int64Val(n.Val()); {
switch x, ok := constant.Uint64Val(n.Val()); {
case !ok:
fallthrough
default:
fmt.Fprintf(s, "('\\x00' + %v)", n.Val())
case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
fmt.Fprintf(s, "'%c'", int(x))
case x < utf8.RuneSelf:
fmt.Fprintf(s, "%q", x)
case 0 <= x && x < 1<<16:
fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
case x < 1<<16:
fmt.Fprintf(s, "'\\u%04x'", x)
case 0 <= x && x <= utf8.MaxRune:
fmt.Fprintf(s, "'\\U%08x'", uint64(x))
case x <= utf8.MaxRune:
fmt.Fprintf(s, "'\\U%08x'", x)
}
} else {
fmt.Fprint(s, types.FmtConst(n.Val(), s.Flag('#')))
@ -632,9 +632,9 @@ func exprFmt(n Node, s fmt.State, prec int) {
case OPACK, ONONAME:
fmt.Fprint(s, n.Sym())
case ONAMEOFFSET:
n := n.(*NameOffsetExpr)
fmt.Fprintf(s, "(%v)(%v@%d)", n.Type(), n.Name_, n.Offset_)
case OLINKSYMOFFSET:
n := n.(*LinksymOffsetExpr)
fmt.Fprintf(s, "(%v)(%s@%d)", n.Type(), n.Linksym.Name, n.Offset_)
case OTYPE:
if n.Type() == nil && n.Sym() != nil {
@ -1020,6 +1020,15 @@ func dumpNodeHeader(w io.Writer, n Node) {
fmt.Fprintf(w, " defn(%p)", n.Name().Defn)
}
if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Curfn != nil {
// Useful to see where Defn is set and what node it points to
fmt.Fprintf(w, " curfn(%p)", n.Name().Curfn)
}
if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Outer != nil {
// Useful to see where Defn is set and what node it points to
fmt.Fprintf(w, " outer(%p)", n.Name().Outer)
}
if EscFmt != nil {
if esc := EscFmt(n); esc != "" {
fmt.Fprintf(w, " %s", esc)
@ -1119,6 +1128,11 @@ func dumpNode(w io.Writer, n Node, depth int) {
return
}
if n == nil {
fmt.Fprint(w, "NilIrNode")
return
}
if len(n.Init()) != 0 {
fmt.Fprintf(w, "%+v-init", n.Op())
dumpNodes(w, n.Init(), depth+1)
@ -1182,6 +1196,18 @@ func dumpNode(w io.Writer, n Node, depth int) {
dumpNode(w, dcl, depth+1)
}
}
if len(fn.ClosureVars) > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-ClosureVars", n.Op())
for _, cv := range fn.ClosureVars {
dumpNode(w, cv, depth+1)
}
}
if len(fn.Enter) > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-Enter", n.Op())
dumpNodes(w, fn.Enter, depth+1)
}
if len(fn.Body) > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-body", n.Op())

View file

@ -63,7 +63,7 @@ type Func struct {
Exit Nodes
// ONAME nodes for all params/locals for this func/closure, does NOT
// include closurevars until transformclosure runs.
// include closurevars until transforming closures during walk.
// Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs,
// with PPARAMs and PPARAMOUTs in order corresponding to the function signature.
// However, as anonymous or blank PPARAMs are not actually declared,
@ -133,9 +133,10 @@ func (n *Func) copy() Node { panic(n.no("copy")) }
func (n *Func) doChildren(do func(Node) bool) bool { return doNodes(n.Body, do) }
func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) }
func (f *Func) Type() *types.Type { return f.Nname.Type() }
func (f *Func) Sym() *types.Sym { return f.Nname.Sym() }
func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() }
func (f *Func) Type() *types.Type { return f.Nname.Type() }
func (f *Func) Sym() *types.Sym { return f.Nname.Sym() }
func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() }
func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi) }
// An Inline holds fields used for function bodies that can be inlined.
type Inline struct {

View file

@ -57,7 +57,7 @@ const (
miniWalkdefShift = 0 // TODO(mdempsky): Move to Name.flags.
miniTypecheckShift = 2
miniDiag = 1 << 4
miniHasCall = 1 << 5 // for miniStmt
miniWalked = 1 << 5 // to prevent/catch re-walking
)
func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) }
@ -71,6 +71,9 @@ func (n *miniNode) SetTypecheck(x uint8) {
func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 }
func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) }
func (n *miniNode) Walked() bool { return n.bits&miniWalked != 0 }
func (n *miniNode) SetWalked(x bool) { n.bits.set(miniWalked, x) }
// Empty, immutable graph structure.
func (n *miniNode) Init() Nodes { return Nodes{} }
@ -85,7 +88,5 @@ func (n *miniNode) Name() *Name { return nil }
func (n *miniNode) Sym() *types.Sym { return nil }
func (n *miniNode) Val() constant.Value { panic(n.no("Val")) }
func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
func (n *miniNode) HasCall() bool { return false }
func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) }
func (n *miniNode) NonNil() bool { return false }
func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) }

View file

@ -58,9 +58,6 @@ type Name struct {
Ntype Ntype
Heapaddr *Name // temp holding heap address of param
// ONAME PAUTOHEAP
Stackcopy *Name // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
// ONAME closure linkage
// Consider:
//
@ -150,12 +147,7 @@ func (n *Name) TypeDefn() *types.Type {
// RecordFrameOffset records the frame offset for the name.
// It is used by package types when laying out function arguments.
func (n *Name) RecordFrameOffset(offset int64) {
if n.Stackcopy != nil {
n.Stackcopy.SetFrameOffset(offset)
n.SetFrameOffset(0)
} else {
n.SetFrameOffset(offset)
}
n.SetFrameOffset(offset)
}
// NewNameAt returns a new ONAME Node associated with symbol s at position pos.
@ -234,7 +226,8 @@ func (n *Name) SetWalkdef(x uint8) {
n.bits.set2(miniWalkdefShift, x)
}
func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() }
func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() }
func (n *Name) LinksymABI(abi obj.ABI) *obj.LSym { return n.sym.LinksymABI(abi) }
func (*Name) CanBeNtype() {}
func (*Name) CanBeAnSSASym() {}
@ -292,6 +285,21 @@ func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) }
func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
// OnStack reports whether variable n may reside on the stack.
func (n *Name) OnStack() bool {
if n.Op() == ONAME {
switch n.Class {
case PPARAM, PPARAMOUT, PAUTO:
return n.Esc() != EscHeap
case PEXTERN, PAUTOHEAP:
return false
}
}
// Note: fmt.go:dumpNodeHeader calls all "func() bool"-typed
// methods, but it can only recover from panics, not Fatalf.
panic(fmt.Sprintf("%v: not a variable: %v", base.FmtPos(n.Pos()), n))
}
// MarkReadonly indicates that n is an ONAME with readonly contents.
func (n *Name) MarkReadonly() {
if n.Op() != ONAME {
@ -501,24 +509,4 @@ func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName {
return p
}
// IsParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
func IsParamStackCopy(n Node) bool {
if n.Op() != ONAME {
return false
}
name := n.(*Name)
return (name.Class == PPARAM || name.Class == PPARAMOUT) && name.Heapaddr != nil
}
// IsParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
func IsParamHeapCopy(n Node) bool {
if n.Op() != ONAME {
return false
}
name := n.(*Name)
return name.Class == PAUTOHEAP && name.Stackcopy != nil
}
var RegFP *Name

View file

@ -52,8 +52,6 @@ type Node interface {
SetTypecheck(x uint8)
NonNil() bool
MarkNonNil()
HasCall() bool
SetHasCall(x bool)
}
// Line returns n's position as a string. If n has been inlined,
@ -216,7 +214,6 @@ const (
OAND // Left & Right
OANDNOT // Left &^ Right
ONEW // new(Left); corresponds to calls to new in source code
ONEWOBJ // runtime.newobject(n.Type); introduced by walk; Left is type descriptor
ONOT // !Left
OBITNOT // ^Left
OPLUS // +Left
@ -294,23 +291,27 @@ const (
OTSLICE // []int
// misc
OINLCALL // intermediary representation of an inlined call.
OEFACE // itable and data words of an empty-interface value.
OITAB // itable word of an interface value.
OIDATA // data word of an interface value in Left
OSPTR // base pointer of a slice or string.
OCFUNC // reference to c function pointer (not go func value)
OCHECKNIL // emit code to ensure pointer/interface not nil
OVARDEF // variable is about to be fully initialized
OVARKILL // variable is dead
OVARLIVE // variable is alive
ORESULT // result of a function call; Xoffset is stack offset
OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
ONAMEOFFSET // offset within a name
// intermediate representation of an inlined call. Uses Init (assignments
// for the captured variables, parameters, retvars, & INLMARK op),
// Body (body of the inlined function), and ReturnVars (list of
// return values)
OINLCALL // intermediary representation of an inlined call.
OEFACE // itable and data words of an empty-interface value.
OITAB // itable word of an interface value.
OIDATA // data word of an interface value in Left
OSPTR // base pointer of a slice or string.
OCFUNC // reference to c function pointer (not go func value)
OCHECKNIL // emit code to ensure pointer/interface not nil
OVARDEF // variable is about to be fully initialized
OVARKILL // variable is dead
OVARLIVE // variable is alive
ORESULT // result of a function call; Xoffset is stack offset
OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
OLINKSYMOFFSET // offset within a name
// arch-specific opcodes
ORETJMP // return to other function
OGETG // runtime.getg() (read g pointer)
OTAILCALL // tail call to another function
OGETG // runtime.getg() (read g pointer)
OEND
)
@ -452,6 +453,9 @@ const (
// Go command pragmas
GoBuildPragma
RegisterParams // TODO remove after register abi is working
)
func AsNode(n types.Object) Node {
@ -542,7 +546,6 @@ func InitExpr(init []Node, expr Node) Node {
}
n.PtrInit().Prepend(init...)
n.SetHasCall(true)
return n
}

View file

@ -250,7 +250,6 @@ func (n *CallExpr) copy() Node {
c := *n
c.init = copyNodes(c.init)
c.Args = copyNodes(c.Args)
c.Rargs = copyNodes(c.Rargs)
c.KeepAlive = copyNames(c.KeepAlive)
return &c
}
@ -264,9 +263,6 @@ func (n *CallExpr) doChildren(do func(Node) bool) bool {
if doNodes(n.Args, do) {
return true
}
if doNodes(n.Rargs, do) {
return true
}
if doNames(n.KeepAlive, do) {
return true
}
@ -278,7 +274,6 @@ func (n *CallExpr) editChildren(edit func(Node) Node) {
n.X = edit(n.X).(Node)
}
editNodes(n.Args, edit)
editNodes(n.Rargs, edit)
editNames(n.KeepAlive, edit)
}
@ -734,6 +729,22 @@ func (n *LabelStmt) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
}
func (n *LinksymOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *LinksymOffsetExpr) copy() Node {
c := *n
c.init = copyNodes(c.init)
return &c
}
func (n *LinksymOffsetExpr) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
return false
}
func (n *LinksymOffsetExpr) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
}
func (n *LogicalExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *LogicalExpr) copy() Node {
c := *n
@ -815,28 +826,6 @@ func (n *MapType) editChildren(edit func(Node) Node) {
func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *NameOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *NameOffsetExpr) copy() Node {
c := *n
c.init = copyNodes(c.init)
return &c
}
func (n *NameOffsetExpr) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
if n.Name_ != nil && do(n.Name_) {
return true
}
return false
}
func (n *NameOffsetExpr) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
if n.Name_ != nil {
n.Name_ = edit(n.Name_).(*Name)
}
}
func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *NilExpr) copy() Node {
c := *n
@ -1233,6 +1222,28 @@ func (n *SwitchStmt) editChildren(edit func(Node) Node) {
editNodes(n.Compiled, edit)
}
func (n *TailCallStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *TailCallStmt) copy() Node {
c := *n
c.init = copyNodes(c.init)
return &c
}
func (n *TailCallStmt) doChildren(do func(Node) bool) bool {
if doNodes(n.init, do) {
return true
}
if n.Target != nil && do(n.Target) {
return true
}
return false
}
func (n *TailCallStmt) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
if n.Target != nil {
n.Target = edit(n.Target).(*Name)
}
}
func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *TypeAssertExpr) copy() Node {
c := *n

View file

@ -91,81 +91,80 @@ func _() {
_ = x[OAND-80]
_ = x[OANDNOT-81]
_ = x[ONEW-82]
_ = x[ONEWOBJ-83]
_ = x[ONOT-84]
_ = x[OBITNOT-85]
_ = x[OPLUS-86]
_ = x[ONEG-87]
_ = x[OOROR-88]
_ = x[OPANIC-89]
_ = x[OPRINT-90]
_ = x[OPRINTN-91]
_ = x[OPAREN-92]
_ = x[OSEND-93]
_ = x[OSLICE-94]
_ = x[OSLICEARR-95]
_ = x[OSLICESTR-96]
_ = x[OSLICE3-97]
_ = x[OSLICE3ARR-98]
_ = x[OSLICEHEADER-99]
_ = x[ORECOVER-100]
_ = x[ORECV-101]
_ = x[ORUNESTR-102]
_ = x[OSELRECV2-103]
_ = x[OIOTA-104]
_ = x[OREAL-105]
_ = x[OIMAG-106]
_ = x[OCOMPLEX-107]
_ = x[OALIGNOF-108]
_ = x[OOFFSETOF-109]
_ = x[OSIZEOF-110]
_ = x[OMETHEXPR-111]
_ = x[OSTMTEXPR-112]
_ = x[OBLOCK-113]
_ = x[OBREAK-114]
_ = x[OCASE-115]
_ = x[OCONTINUE-116]
_ = x[ODEFER-117]
_ = x[OFALL-118]
_ = x[OFOR-119]
_ = x[OFORUNTIL-120]
_ = x[OGOTO-121]
_ = x[OIF-122]
_ = x[OLABEL-123]
_ = x[OGO-124]
_ = x[ORANGE-125]
_ = x[ORETURN-126]
_ = x[OSELECT-127]
_ = x[OSWITCH-128]
_ = x[OTYPESW-129]
_ = x[OTCHAN-130]
_ = x[OTMAP-131]
_ = x[OTSTRUCT-132]
_ = x[OTINTER-133]
_ = x[OTFUNC-134]
_ = x[OTARRAY-135]
_ = x[OTSLICE-136]
_ = x[OINLCALL-137]
_ = x[OEFACE-138]
_ = x[OITAB-139]
_ = x[OIDATA-140]
_ = x[OSPTR-141]
_ = x[OCFUNC-142]
_ = x[OCHECKNIL-143]
_ = x[OVARDEF-144]
_ = x[OVARKILL-145]
_ = x[OVARLIVE-146]
_ = x[ORESULT-147]
_ = x[OINLMARK-148]
_ = x[ONAMEOFFSET-149]
_ = x[ORETJMP-150]
_ = x[OGETG-151]
_ = x[OEND-152]
_ = x[ONOT-83]
_ = x[OBITNOT-84]
_ = x[OPLUS-85]
_ = x[ONEG-86]
_ = x[OOROR-87]
_ = x[OPANIC-88]
_ = x[OPRINT-89]
_ = x[OPRINTN-90]
_ = x[OPAREN-91]
_ = x[OSEND-92]
_ = x[OSLICE-93]
_ = x[OSLICEARR-94]
_ = x[OSLICESTR-95]
_ = x[OSLICE3-96]
_ = x[OSLICE3ARR-97]
_ = x[OSLICEHEADER-98]
_ = x[ORECOVER-99]
_ = x[ORECV-100]
_ = x[ORUNESTR-101]
_ = x[OSELRECV2-102]
_ = x[OIOTA-103]
_ = x[OREAL-104]
_ = x[OIMAG-105]
_ = x[OCOMPLEX-106]
_ = x[OALIGNOF-107]
_ = x[OOFFSETOF-108]
_ = x[OSIZEOF-109]
_ = x[OMETHEXPR-110]
_ = x[OSTMTEXPR-111]
_ = x[OBLOCK-112]
_ = x[OBREAK-113]
_ = x[OCASE-114]
_ = x[OCONTINUE-115]
_ = x[ODEFER-116]
_ = x[OFALL-117]
_ = x[OFOR-118]
_ = x[OFORUNTIL-119]
_ = x[OGOTO-120]
_ = x[OIF-121]
_ = x[OLABEL-122]
_ = x[OGO-123]
_ = x[ORANGE-124]
_ = x[ORETURN-125]
_ = x[OSELECT-126]
_ = x[OSWITCH-127]
_ = x[OTYPESW-128]
_ = x[OTCHAN-129]
_ = x[OTMAP-130]
_ = x[OTSTRUCT-131]
_ = x[OTINTER-132]
_ = x[OTFUNC-133]
_ = x[OTARRAY-134]
_ = x[OTSLICE-135]
_ = x[OINLCALL-136]
_ = x[OEFACE-137]
_ = x[OITAB-138]
_ = x[OIDATA-139]
_ = x[OSPTR-140]
_ = x[OCFUNC-141]
_ = x[OCHECKNIL-142]
_ = x[OVARDEF-143]
_ = x[OVARKILL-144]
_ = x[OVARLIVE-145]
_ = x[ORESULT-146]
_ = x[OINLMARK-147]
_ = x[OLINKSYMOFFSET-148]
_ = x[OTAILCALL-149]
_ = x[OGETG-150]
_ = x[OEND-151]
}
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND"
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 804, 812, 818, 825, 832, 838, 845, 855, 861, 865, 868}
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 734, 738, 745, 751, 756, 762, 768, 775, 780, 784, 789, 793, 798, 806, 812, 819, 826, 832, 839, 852, 860, 864, 867}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {

View file

@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) {
_64bit uintptr // size on 64bit platforms
}{
{Func{}, 188, 328},
{Name{}, 116, 208},
{Name{}, 112, 200},
}
for _, tt := range tests {

View file

@ -50,11 +50,9 @@ type miniStmt struct {
func (*miniStmt) isStmt() {}
func (n *miniStmt) Init() Nodes { return n.init }
func (n *miniStmt) SetInit(x Nodes) { n.init = x }
func (n *miniStmt) PtrInit() *Nodes { return &n.init }
func (n *miniStmt) HasCall() bool { return n.bits&miniHasCall != 0 }
func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) }
func (n *miniStmt) Init() Nodes { return n.init }
func (n *miniStmt) SetInit(x Nodes) { n.init = x }
func (n *miniStmt) PtrInit() *Nodes { return &n.init }
// An AssignListStmt is an assignment statement with
// more than one item on at least one side: Lhs = Rhs.
@ -146,9 +144,6 @@ func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
}
// A BranchStmt is a break, continue, fallthrough, or goto statement.
//
// For back-end code generation, Op may also be RETJMP (return+jump),
// in which case the label names another function entirely.
type BranchStmt struct {
miniStmt
Label *types.Sym // label if present
@ -156,7 +151,7 @@ type BranchStmt struct {
func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt {
switch op {
case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP:
case OBREAK, OCONTINUE, OFALL, OGOTO:
// ok
default:
panic("NewBranch " + op.String())
@ -343,7 +338,7 @@ type SelectStmt struct {
HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
Compiled Nodes // compiled form, after walkswitch
Compiled Nodes // compiled form, after walkSwitch
}
func NewSelectStmt(pos src.XPos, cases []*CommClause) *SelectStmt {
@ -376,7 +371,7 @@ type SwitchStmt struct {
HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
Compiled Nodes // compiled form, after walkswitch
Compiled Nodes // compiled form, after walkSwitch
}
func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
@ -386,6 +381,23 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
return n
}
// A TailCallStmt is a tail call statement, which is used for back-end
// code generation to jump directly to another function entirely.
type TailCallStmt struct {
miniStmt
Target *Name
}
func NewTailCallStmt(pos src.XPos, target *Name) *TailCallStmt {
if target.Op() != ONAME || target.Class != PFUNC {
base.FatalfAt(pos, "tail call to non-func %v", target)
}
n := &TailCallStmt{Target: target}
n.pos = pos
n.op = OTAILCALL
return n
}
// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch.
type TypeSwitchGuard struct {
miniNode

View file

@ -9,12 +9,6 @@ import (
"cmd/internal/obj"
)
// Names holds known names.
var Names struct {
Staticuint64s *Name
Zerobase *Name
}
// Syms holds known symbols.
var Syms struct {
AssertE2I *obj.LSym
@ -46,6 +40,7 @@ var Syms struct {
Racewriterange *obj.LSym
// Wasm
SigPanic *obj.LSym
Staticuint64s *obj.LSym
Typedmemclr *obj.LSym
Typedmemmove *obj.LSym
Udiv *obj.LSym
@ -70,13 +65,8 @@ var Syms struct {
// Pkgs holds known packages.
var Pkgs struct {
Go *types.Pkg
Itab *types.Pkg
Itablink *types.Pkg
Map *types.Pkg
Msan *types.Pkg
Race *types.Pkg
Runtime *types.Pkg
Track *types.Pkg
Unsafe *types.Pkg
Go *types.Pkg
Itab *types.Pkg
Runtime *types.Pkg
Unsafe *types.Pkg
}

View file

@ -47,7 +47,7 @@ func (m *bvecSet) grow() {
m.index = newIndex
}
// add adds bv to the set and returns its index in m.extractUniqe.
// add adds bv to the set and returns its index in m.extractUnique.
// The caller must not modify bv after this.
func (m *bvecSet) add(bv bitvec.BitVec) int {
if len(m.uniq)*4 >= len(m.index) {

View file

@ -17,12 +17,14 @@ package liveness
import (
"crypto/md5"
"fmt"
"sort"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/typebits"
"cmd/compile/internal/types"
@ -174,14 +176,14 @@ type progeffectscache struct {
initialized bool
}
// ShouldTrack reports whether the liveness analysis
// shouldTrack reports whether the liveness analysis
// should track the variable n.
// We don't care about variables that have no pointers,
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func ShouldTrack(n *ir.Name) bool {
return (n.Class == ir.PAUTO || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers()
func shouldTrack(n *ir.Name) bool {
return (n.Class == ir.PAUTO && n.Esc() != ir.EscHeap || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
@ -189,7 +191,7 @@ func ShouldTrack(n *ir.Name) bool {
func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) {
var vars []*ir.Name
for _, n := range fn.Dcl {
if ShouldTrack(n) {
if shouldTrack(n) {
vars = append(vars, n)
}
}
@ -788,7 +790,7 @@ func (lv *liveness) epilogue() {
if n.Class == ir.PPARAM {
continue // ok
}
base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n)
base.FatalfAt(n.Pos(), "bad live variable at entry of %v: %L", lv.fn.Nname, n)
}
// Record live variables.
@ -1060,7 +1062,7 @@ func (lv *liveness) printDebug() {
func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
// (Nodes without pointers aren't in lv.vars; see ShouldTrack.)
var maxArgNode *ir.Name
for _, n := range lv.vars {
switch n.Class {
@ -1179,9 +1181,54 @@ func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Map
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCLocals
if x := lv.emitStackObjects(); x != nil {
p := pp.Prog(obj.AFUNCDATA)
p.From.SetConst(objabi.FUNCDATA_StackObjects)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
}
return lv.livenessMap
}
func (lv *liveness) emitStackObjects() *obj.LSym {
var vars []*ir.Name
for _, n := range lv.fn.Dcl {
if shouldTrack(n) && n.Addrtaken() && n.Esc() != ir.EscHeap {
vars = append(vars, n)
}
}
if len(vars) == 0 {
return nil
}
// Sort variables from lowest to highest address.
sort.Slice(vars, func(i, j int) bool { return vars[i].FrameOffset() < vars[j].FrameOffset() })
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
x := base.Ctxt.Lookup(lv.fn.LSym.Name + ".stkobj")
lv.fn.LSym.Func().StackObjects = x
off := 0
off = objw.Uintptr(x, off, uint64(len(vars)))
for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp.
off = objw.Uintptr(x, off, uint64(v.FrameOffset()))
off = objw.SymPtr(x, off, reflectdata.TypeLinksym(v.Type()), 0)
}
if base.Flag.Live != 0 {
for _, v := range vars {
base.WarnfAt(v.Pos(), "stack object %v %v", v, v.Type())
}
}
return x
}
// isfat reports whether a variable of type t needs multiple assignments to initialize.
// For example:
//

View file

@ -48,6 +48,9 @@ func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
if ipkg == ir.Pkgs.Unsafe {
p.importedUnsafe = true
}
if ipkg.Path == "embed" {
p.importedEmbed = true
}
}
func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
@ -164,9 +167,8 @@ func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) {
if decl.Pragma != nil {
pragma := decl.Pragma.(*pragmas)
if err := varEmbed(g.makeXPos, names[0], decl, pragma); err != nil {
base.ErrorfAt(g.pos(decl), "%s", err.Error())
}
// TODO(mdempsky): Plumb noder.importedEmbed through to here.
varEmbed(g.makeXPos, names[0], decl, pragma, true)
g.reportUnused(pragma)
}

View file

@ -428,7 +428,7 @@ func clearImports() {
if types.IsDotAlias(s) {
// throw away top-level name left over
// from previous import . "x"
// We'll report errors after type checking in checkDotImports.
// We'll report errors after type checking in CheckDotImports.
s.Def = nil
continue
}

View file

@ -28,6 +28,7 @@ const (
ir.Nosplit |
ir.Noinline |
ir.NoCheckPtr |
ir.RegisterParams | // TODO remove after register abi is working
ir.CgoUnsafeArgs |
ir.UintptrEscapes |
ir.Systemstack |
@ -79,6 +80,8 @@ func pragmaFlag(verb string) ir.PragmaFlag {
// in the argument list.
// Used in syscall/dll_windows.go.
return ir.UintptrEscapes
case "go:registerparams": // TODO remove after register abi is working
return ir.RegisterParams
case "go:notinheap":
return ir.NotInHeap
}

View file

@ -5,7 +5,6 @@
package noder
import (
"errors"
"fmt"
"go/constant"
"go/token"
@ -136,7 +135,15 @@ func LoadPackage(filenames []string) {
for i := 0; i < len(typecheck.Target.Decls); i++ {
n := typecheck.Target.Decls[i]
if n.Op() == ir.ODCLFUNC {
if base.Flag.W > 1 {
s := fmt.Sprintf("\nbefore typecheck %v", n)
ir.Dump(s, n)
}
typecheck.FuncBody(n.(*ir.Func))
if base.Flag.W > 1 {
s := fmt.Sprintf("\nafter typecheck %v", n)
ir.Dump(s, n)
}
fcount++
}
}
@ -385,9 +392,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
exprs := p.exprList(decl.Values)
if pragma, ok := decl.Pragma.(*pragmas); ok {
if err := varEmbed(p.makeXPos, names[0], decl, pragma); err != nil {
p.errorAt(decl.Pos(), "%s", err.Error())
}
varEmbed(p.makeXPos, names[0], decl, pragma, p.importedEmbed)
p.checkUnused(pragma)
}
@ -555,7 +560,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
}
} else {
f.Shortname = name
name = ir.BlankNode.Sym() // filled in by typecheckfunc
name = ir.BlankNode.Sym() // filled in by tcFunc
}
f.Nname = ir.NewNameAt(p.pos(fun.Name), name)
@ -1001,7 +1006,7 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
if s == nil {
} else if s.Op() == ir.OBLOCK && len(s.(*ir.BlockStmt).List) > 0 {
// Inline non-empty block.
// Empty blocks must be preserved for checkreturn.
// Empty blocks must be preserved for CheckReturn.
nodes = append(nodes, s.(*ir.BlockStmt).List...)
} else {
nodes = append(nodes, s)
@ -1774,7 +1779,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
fn := ir.NewFunc(p.pos(expr))
fn.SetIsHiddenClosure(ir.CurFunc != nil)
fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by typecheckclosure
fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by tcClosure
fn.Nname.Func = fn
fn.Nname.Ntype = xtype
fn.Nname.Defn = fn
@ -1829,29 +1834,35 @@ func oldname(s *types.Sym) ir.Node {
return n
}
func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas) error {
func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) {
if pragma.Embeds == nil {
return nil
return
}
pragmaEmbeds := pragma.Embeds
pragma.Embeds = nil
pos := makeXPos(pragmaEmbeds[0].Pos)
if base.Flag.Cfg.Embed.Patterns == nil {
return errors.New("invalid go:embed: build system did not supply embed configuration")
if !haveEmbed {
base.ErrorfAt(pos, "go:embed only allowed in Go files that import \"embed\"")
return
}
if len(decl.NameList) > 1 {
return errors.New("go:embed cannot apply to multiple vars")
base.ErrorfAt(pos, "go:embed cannot apply to multiple vars")
return
}
if decl.Values != nil {
return errors.New("go:embed cannot apply to var with initializer")
base.ErrorfAt(pos, "go:embed cannot apply to var with initializer")
return
}
if decl.Type == nil {
// Should not happen, since Values == nil now.
return errors.New("go:embed cannot apply to var without type")
base.ErrorfAt(pos, "go:embed cannot apply to var without type")
return
}
if typecheck.DeclContext != ir.PEXTERN {
return errors.New("go:embed cannot apply to var inside func")
base.ErrorfAt(pos, "go:embed cannot apply to var inside func")
return
}
var embeds []ir.Embed
@ -1860,5 +1871,4 @@ func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.Va
}
typecheck.Target.Embeds = append(typecheck.Target.Embeds, name)
name.Embed = &embeds
return nil
}

View file

@ -205,7 +205,7 @@ func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16,
func (pp *Progs) SetText(fn *ir.Func) {
if pp.Text != nil {
base.Fatalf("Progs.settext called twice")
base.Fatalf("Progs.SetText called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt

View file

@ -60,10 +60,10 @@ func Task() *ir.Name {
fns = append(fns, fn.Linksym())
}
if typecheck.InitTodoFunc.Dcl != nil {
// We only generate temps using initTodo if there
// We only generate temps using InitTodoFunc if there
// are package-scope initialization statements, so
// something's weird if we get here.
base.Fatalf("initTodo still has declarations")
base.Fatalf("InitTodoFunc still has declarations")
}
typecheck.InitTodoFunc = nil

View file

@ -113,7 +113,7 @@ func initOrder(l []ir.Node) []ir.Node {
// first.
base.ExitIfErrors()
o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name))
o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name), new(ir.NameSet))
base.Fatalf("initialization unfinished, but failed to identify loop")
}
}
@ -184,10 +184,7 @@ func (o *InitOrder) flushReady(initialize func(ir.Node)) {
// path points to a slice used for tracking the sequence of
// variables/functions visited. Using a pointer to a slice allows the
// slice capacity to grow and limit reallocations.
func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) {
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but initialization cycles are rare.
func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name, ok *ir.NameSet) {
for i, x := range *path {
if x == n {
reportInitLoopAndExit((*path)[i:])
@ -204,12 +201,19 @@ func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) {
*path = append(*path, n)
for _, ref := range refers {
// Short-circuit variables that were initialized.
if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone {
if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone || ok.Has(ref) {
continue
}
o.findInitLoopAndExit(ref, path)
o.findInitLoopAndExit(ref, path, ok)
}
// n is not involved in a cycle.
// Record that fact to avoid checking it again when reached another way,
// or else this traversal will take exponential time traversing all paths
// through the part of the package's call graph implicated in the cycle.
ok.Add(n)
*path = (*path)[:len(*path)-1]
}

View file

@ -689,7 +689,7 @@ func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
base.Fatalf("EqInterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)

View file

@ -32,7 +32,7 @@ type itabEntry struct {
// symbols of each method in
// the itab, sorted by byte offset;
// filled in by peekitabs
// filled in by CompileITabs
entries []*obj.LSym
}
@ -401,7 +401,7 @@ func dimportpath(p *types.Pkg) {
}
// If we are compiling the runtime package, there are two runtime packages around
// -- localpkg and Runtimepkg. We don't want to produce import path symbols for
// -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for
// both of them, so just produce one for localpkg.
if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime {
return
@ -562,7 +562,7 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
}
for _, a := range m {
WriteType(a.type_)
writeType(a.type_)
}
ot = dgopkgpathOff(lsym, ot, typePkg(t))
@ -613,7 +613,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
nsym := dname(a.name.Name, "", pkg, exported)
ot = objw.SymPtrOff(lsym, ot, nsym)
ot = dmethodptrOff(lsym, ot, WriteType(a.mtype))
ot = dmethodptrOff(lsym, ot, writeType(a.mtype))
ot = dmethodptrOff(lsym, ot, a.isym)
ot = dmethodptrOff(lsym, ot, a.tsym)
}
@ -690,7 +690,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if t.Sym() != nil || methods(tptr) != nil {
sptrWeak = false
}
sptr = WriteType(tptr)
sptr = writeType(tptr)
}
gcsym, useGCProg, ptrdata := dgcsym(t)
@ -791,7 +791,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
// TrackSym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t.
func TrackSym(t *types.Type, f *types.Field) *obj.LSym {
return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name).Linksym()
return base.PkgLinksym("go.track", t.ShortString()+"."+f.Sym.Name, obj.ABI0)
}
func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
@ -811,7 +811,7 @@ func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
func TypeSym(t *types.Type) *types.Sym {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
base.Fatalf("typenamesym %v", t)
base.Fatalf("TypeSym %v", t)
}
if t.Kind() == types.TFUNC && t.Recv() != nil {
base.Fatalf("misuse of method type: %v", t)
@ -836,39 +836,22 @@ func TypeLinksym(t *types.Type) *obj.LSym {
}
func TypePtr(t *types.Type) *ir.AddrExpr {
s := TypeSym(t)
if s.Def == nil {
n := ir.NewNameAt(src.NoXPos, s)
n.SetType(types.Types[types.TUINT8])
n.Class = ir.PEXTERN
n.SetTypecheck(1)
s.Def = n
}
n := typecheck.NodAddr(ir.AsNode(s.Def))
n.SetType(types.NewPtr(s.Def.Type()))
n.SetTypecheck(1)
return n
n := ir.NewLinksymExpr(base.Pos, TypeLinksym(t), types.Types[types.TUINT8])
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
func ITabAddr(t, itype *types.Type) *ir.AddrExpr {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
base.Fatalf("itabname(%v, %v)", t, itype)
base.Fatalf("ITabAddr(%v, %v)", t, itype)
}
s := ir.Pkgs.Itab.Lookup(t.ShortString() + "," + itype.ShortString())
if s.Def == nil {
n := typecheck.NewName(s)
n.SetType(types.Types[types.TUINT8])
n.Class = ir.PEXTERN
n.SetTypecheck(1)
s.Def = n
itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: n.Linksym()})
s, existed := ir.Pkgs.Itab.LookupOK(t.ShortString() + "," + itype.ShortString())
if !existed {
itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
}
n := typecheck.NodAddr(ir.AsNode(s.Def))
n.SetType(types.NewPtr(s.Def.Type()))
n.SetTypecheck(1)
return n
lsym := s.Linksym()
n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
// needkeyupdate reports whether map updates with t as a key
@ -933,10 +916,10 @@ func formalType(t *types.Type) *types.Type {
return t
}
func WriteType(t *types.Type) *obj.LSym {
func writeType(t *types.Type) *obj.LSym {
t = formalType(t)
if t.IsUntyped() {
base.Fatalf("dtypesym %v", t)
base.Fatalf("writeType %v", t)
}
s := types.TypeSym(t)
@ -983,9 +966,9 @@ func WriteType(t *types.Type) *obj.LSym {
case types.TARRAY:
// ../../../../runtime/type.go:/arrayType
s1 := WriteType(t.Elem())
s1 := writeType(t.Elem())
t2 := types.NewSlice(t.Elem())
s2 := WriteType(t2)
s2 := writeType(t2)
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
ot = objw.SymPtr(lsym, ot, s2, 0)
@ -994,14 +977,14 @@ func WriteType(t *types.Type) *obj.LSym {
case types.TSLICE:
// ../../../../runtime/type.go:/sliceType
s1 := WriteType(t.Elem())
s1 := writeType(t.Elem())
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
case types.TCHAN:
// ../../../../runtime/type.go:/chanType
s1 := WriteType(t.Elem())
s1 := writeType(t.Elem())
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
@ -1009,15 +992,15 @@ func WriteType(t *types.Type) *obj.LSym {
case types.TFUNC:
for _, t1 := range t.Recvs().Fields().Slice() {
WriteType(t1.Type)
writeType(t1.Type)
}
isddd := false
for _, t1 := range t.Params().Fields().Slice() {
isddd = t1.IsDDD()
WriteType(t1.Type)
writeType(t1.Type)
}
for _, t1 := range t.Results().Fields().Slice() {
WriteType(t1.Type)
writeType(t1.Type)
}
ot = dcommontype(lsym, t)
@ -1037,20 +1020,20 @@ func WriteType(t *types.Type) *obj.LSym {
// Array of rtype pointers follows funcType.
for _, t1 := range t.Recvs().Fields().Slice() {
ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
}
for _, t1 := range t.Params().Fields().Slice() {
ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
}
for _, t1 := range t.Results().Fields().Slice() {
ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
}
case types.TINTER:
m := imethods(t)
n := len(m)
for _, a := range m {
WriteType(a.type_)
writeType(a.type_)
}
// ../../../../runtime/type.go:/interfaceType
@ -1078,14 +1061,14 @@ func WriteType(t *types.Type) *obj.LSym {
nsym := dname(a.name.Name, "", pkg, exported)
ot = objw.SymPtrOff(lsym, ot, nsym)
ot = objw.SymPtrOff(lsym, ot, WriteType(a.type_))
ot = objw.SymPtrOff(lsym, ot, writeType(a.type_))
}
// ../../../../runtime/type.go:/mapType
case types.TMAP:
s1 := WriteType(t.Key())
s2 := WriteType(t.Elem())
s3 := WriteType(MapBucketType(t))
s1 := writeType(t.Key())
s2 := writeType(t.Elem())
s3 := writeType(MapBucketType(t))
hasher := genhash(t.Key())
ot = dcommontype(lsym, t)
@ -1132,7 +1115,7 @@ func WriteType(t *types.Type) *obj.LSym {
}
// ../../../../runtime/type.go:/ptrType
s1 := WriteType(t.Elem())
s1 := writeType(t.Elem())
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
@ -1143,7 +1126,7 @@ func WriteType(t *types.Type) *obj.LSym {
case types.TSTRUCT:
fields := t.Fields().Slice()
for _, t1 := range fields {
WriteType(t1.Type)
writeType(t1.Type)
}
// All non-exported struct field names within a struct
@ -1171,7 +1154,7 @@ func WriteType(t *types.Type) *obj.LSym {
for _, f := range fields {
// ../../../../runtime/type.go:/structField
ot = dnameField(lsym, ot, spkg, f)
ot = objw.SymPtr(lsym, ot, WriteType(f.Type), 0)
ot = objw.SymPtr(lsym, ot, writeType(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) {
base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
@ -1275,7 +1258,7 @@ func genfun(t, it *types.Type) []*obj.LSym {
}
// ITabSym uses the information gathered in
// peekitabs to de-virtualize interface methods.
// CompileITabs to de-virtualize interface methods.
// Since this is called by the SSA backend, it shouldn't
// generate additional Nodes, Syms, etc.
func ITabSym(it *obj.LSym, offset int64) *obj.LSym {
@ -1312,7 +1295,7 @@ func NeedRuntimeType(t *types.Type) {
}
func WriteRuntimeTypes() {
// Process signatset. Use a loop, as dtypesym adds
// Process signatset. Use a loop, as writeType adds
// entries to signatset while it is being processed.
signats := make([]typeAndStr, len(signatslice))
for len(signatslice) > 0 {
@ -1326,9 +1309,9 @@ func WriteRuntimeTypes() {
sort.Sort(typesByString(signats))
for _, ts := range signats {
t := ts.t
WriteType(t)
writeType(t)
if t.Sym() != nil {
WriteType(types.NewPtr(t))
writeType(types.NewPtr(t))
}
}
}
@ -1345,8 +1328,8 @@ func WriteTabs() {
// _ [4]byte
// fun [1]uintptr // variable sized
// }
o := objw.SymPtr(i.lsym, 0, WriteType(i.itype), 0)
o = objw.SymPtr(i.lsym, o, WriteType(i.t), 0)
o := objw.SymPtr(i.lsym, 0, writeType(i.itype), 0)
o = objw.SymPtr(i.lsym, o, writeType(i.t), 0)
o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
o += 4 // skip unused field
for _, fn := range genfun(i.t, i.itype) {
@ -1373,7 +1356,7 @@ func WriteTabs() {
if p.Class != ir.PFUNC {
t = types.NewPtr(t)
}
tsym := WriteType(t)
tsym := writeType(t)
ot = objw.SymPtrOff(s, ot, nsym)
ot = objw.SymPtrOff(s, ot, tsym)
// Plugin exports symbols as interfaces. Mark their types
@ -1407,16 +1390,16 @@ func WriteBasicTypes() {
// but using runtime means fewer copies in object files.
if base.Ctxt.Pkgpath == "runtime" {
for i := types.Kind(1); i <= types.TBOOL; i++ {
WriteType(types.NewPtr(types.Types[i]))
writeType(types.NewPtr(types.Types[i]))
}
WriteType(types.NewPtr(types.Types[types.TSTRING]))
WriteType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
writeType(types.NewPtr(types.Types[types.TSTRING]))
writeType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
WriteType(types.NewPtr(types.ErrorType))
writeType(types.NewPtr(types.ErrorType))
WriteType(types.NewSignature(types.NoPkg, nil, []*types.Field{
writeType(types.NewSignature(types.NoPkg, nil, []*types.Field{
types.NewField(base.Pos, nil, types.ErrorType),
}, []*types.Field{
types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
@ -1426,11 +1409,12 @@ func WriteBasicTypes() {
dimportpath(ir.Pkgs.Runtime)
if base.Flag.Race {
dimportpath(ir.Pkgs.Race)
dimportpath(types.NewPkg("runtime/race", ""))
}
if base.Flag.MSan {
dimportpath(ir.Pkgs.Msan)
dimportpath(types.NewPkg("runtime/msan", ""))
}
dimportpath(types.NewPkg("main", ""))
}
}
@ -1617,13 +1601,13 @@ func (p *gcProg) emit(t *types.Type, offset int64) {
}
switch t.Kind() {
default:
base.Fatalf("GCProg.emit: unexpected type %v", t)
base.Fatalf("gcProg.emit: unexpected type %v", t)
case types.TSTRING:
p.w.Ptr(offset / int64(types.PtrSize))
case types.TINTER:
// Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
// Note: the first word isn't a pointer. See comment in typebits.Set
p.w.Ptr(offset/int64(types.PtrSize) + 1)
case types.TSLICE:
@ -1632,7 +1616,7 @@ func (p *gcProg) emit(t *types.Type, offset int64) {
case types.TARRAY:
if t.NumElem() == 0 {
// should have been handled by haspointers check above
base.Fatalf("GCProg.emit: empty array")
base.Fatalf("gcProg.emit: empty array")
}
// Flatten array-of-array-of-array to just a big array by multiplying counts.
@ -1670,18 +1654,9 @@ func ZeroAddr(size int64) ir.Node {
if ZeroSize < size {
ZeroSize = size
}
s := ir.Pkgs.Map.Lookup("zero")
if s.Def == nil {
x := typecheck.NewName(s)
x.SetType(types.Types[types.TUINT8])
x.Class = ir.PEXTERN
x.SetTypecheck(1)
s.Def = x
}
z := typecheck.NodAddr(ir.AsNode(s.Def))
z.SetType(types.NewPtr(types.Types[types.TUINT8]))
z.SetTypecheck(1)
return z
lsym := base.PkgLinksym("go.map", "zero", obj.ABI0)
x := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
return typecheck.Expr(typecheck.NodAddr(x))
}
func CollectPTabs() {
@ -1794,7 +1769,7 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
}
as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
fn.Body.Append(as)
fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym)))
fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name)))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)

View file

@ -139,7 +139,7 @@ func dse(f *Func) {
func elimDeadAutosGeneric(f *Func) {
addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches
elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is
used := make(map[*ir.Name]bool) // used autos that must be kept
var used ir.NameSet // used autos that must be kept
// visit the value and report whether any of the maps are updated
visit := func(v *Value) (changed bool) {
@ -178,8 +178,8 @@ func elimDeadAutosGeneric(f *Func) {
if !ok || n.Class != ir.PAUTO {
return
}
if !used[n] {
used[n] = true
if !used.Has(n) {
used.Add(n)
changed = true
}
return
@ -212,8 +212,8 @@ func elimDeadAutosGeneric(f *Func) {
if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {
for _, a := range args {
if n, ok := addr[a]; ok {
if !used[n] {
used[n] = true
if !used.Has(n) {
used.Add(n)
changed = true
}
}
@ -224,7 +224,7 @@ func elimDeadAutosGeneric(f *Func) {
// Propagate any auto addresses through v.
var node *ir.Name
for _, a := range args {
if n, ok := addr[a]; ok && !used[n] {
if n, ok := addr[a]; ok && !used.Has(n) {
if node == nil {
node = n
} else if node != n {
@ -233,7 +233,7 @@ func elimDeadAutosGeneric(f *Func) {
// multiple pointers (e.g. NeqPtr, Phi etc.).
// This is rare, so just propagate the first
// value to keep things simple.
used[n] = true
used.Add(n)
changed = true
}
}
@ -249,7 +249,7 @@ func elimDeadAutosGeneric(f *Func) {
}
if addr[v] != node {
// This doesn't happen in practice, but catch it just in case.
used[node] = true
used.Add(node)
changed = true
}
return
@ -269,8 +269,8 @@ func elimDeadAutosGeneric(f *Func) {
}
// keep the auto if its address reaches a control value
for _, c := range b.ControlValues() {
if n, ok := addr[c]; ok && !used[n] {
used[n] = true
if n, ok := addr[c]; ok && !used.Has(n) {
used.Add(n)
changed = true
}
}
@ -282,7 +282,7 @@ func elimDeadAutosGeneric(f *Func) {
// Eliminate stores to unread autos.
for v, n := range elim {
if used[n] {
if used.Has(n) {
continue
}
// replace with OpCopy

View file

@ -24,6 +24,10 @@ type offsetKey struct {
pt *types.Type
}
func isBlockMultiValueExit(b *Block) bool {
return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && len(b.Controls) > 0 && b.Controls[0].Op == OpMakeResult
}
// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
@ -194,7 +198,8 @@ func expandCalls(f *Func) {
}
break
}
if leaf.Op == OpIData {
switch leaf.Op {
case OpIData, OpStructSelect, OpArraySelect:
leafType = removeTrivialWrapperTypes(leaf.Type)
}
aux := selector.Aux
@ -624,6 +629,24 @@ func expandCalls(f *Func) {
return x
}
rewriteDereference := func(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value {
source := a.Args[0]
dst := offsetFrom(base, offset, source.Type)
if a.Uses == 1 && a.Block == b {
a.reset(OpMove)
a.Pos = pos
a.Type = types.TypeMem
a.Aux = typ
a.AuxInt = size
a.SetArgs3(dst, source, mem)
mem = a
} else {
mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem)
mem.AuxInt = size
}
return mem
}
// rewriteArgs removes all the Args from a call and converts the call args into appropriate
// stores (or later, register movement). Extra args for interface and closure calls are ignored,
// but removed.
@ -631,7 +654,7 @@ func expandCalls(f *Func) {
// Thread the stores on the memory arg
aux := v.Aux.(*AuxCall)
pos := v.Pos.WithNotStmt()
m0 := v.Args[len(v.Args)-1]
m0 := v.MemoryArg()
mem := m0
for i, a := range v.Args {
if i < firstArg {
@ -647,20 +670,7 @@ func expandCalls(f *Func) {
}
// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
// TODO this will be more complicated with registers in the picture.
source := a.Args[0]
dst := f.ConstOffPtrSP(source.Type, aux.OffsetOfArg(auxI), sp)
if a.Uses == 1 && a.Block == v.Block {
a.reset(OpMove)
a.Pos = pos
a.Type = types.TypeMem
a.Aux = aux.TypeOfArg(auxI)
a.AuxInt = aux.SizeOfArg(auxI)
a.SetArgs3(dst, source, mem)
mem = a
} else {
mem = v.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(auxI), dst, source, mem)
mem.AuxInt = aux.SizeOfArg(auxI)
}
mem = rewriteDereference(v.Block, sp, a, mem, aux.OffsetOfArg(auxI), aux.SizeOfArg(auxI), aux.TypeOfArg(auxI), pos)
} else {
if debug {
fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
@ -692,6 +702,45 @@ func expandCalls(f *Func) {
v.SetArgs2(code, mem)
}
}
if isBlockMultiValueExit(b) {
// Very similar to code in rewriteArgs, but results instead of args.
v := b.Controls[0]
m0 := v.MemoryArg()
mem := m0
aux := f.OwnAux
pos := v.Pos.WithNotStmt()
for j, a := range v.Args {
i := int64(j)
if a == m0 {
break
}
auxType := aux.TypeOfResult(i)
auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.results[i].Name, sp, mem)
auxOffset := int64(0)
auxSize := aux.SizeOfResult(i)
if a.Op == OpDereference {
// Avoid a self-move, and if one is detected try to remove the already-inserted VarDef for the assignment that won't happen.
if dAddr, dMem := a.Args[0], a.Args[1]; dAddr.Op == OpLocalAddr && dAddr.Args[0].Op == OpSP &&
dAddr.Args[1] == dMem && dAddr.Aux == aux.results[i].Name {
if dMem.Op == OpVarDef && dMem.Aux == dAddr.Aux {
dMem.copyOf(dMem.MemoryArg()) // elide the VarDef
}
continue
}
mem = rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos)
} else {
if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
addr := a.Args[0]
if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.results[i].Name {
continue
}
}
mem = storeArgOrLoad(v.Pos, b, auxBase, a, mem, aux.TypeOfResult(i), auxOffset)
}
}
b.SetControl(mem)
v.reset(OpInvalid) // otherwise it can have a mem operand which will fail check(), even though it is dead.
}
}
for i, name := range f.Names {

View file

@ -58,6 +58,11 @@ type Func struct {
// of keys to make iteration order deterministic.
Names []LocalSlot
// RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry.
RegArgs []ArgPair
// AuxCall describing parameters and results for this function.
OwnAux *AuxCall
// WBLoads is a list of Blocks that branch on the write
// barrier flag. Safe-points are disabled from the OpLoad that
// reads the write-barrier flag until the control flow rejoins
@ -771,7 +776,7 @@ func DebugNameMatch(evname, name string) bool {
}
func (f *Func) spSb() (sp, sb *Value) {
initpos := f.Entry.Pos
initpos := src.NoXPos // These are originally created with no position in ssa.go; if they are optimized out then recreated, should be the same.
for _, v := range f.Entry.Values {
if v.Op == OpSB {
sb = v
@ -780,7 +785,7 @@ func (f *Func) spSb() (sp, sb *Value) {
sp = v
}
if sb != nil && sp != nil {
break
return
}
}
if sb == nil {

View file

@ -475,7 +475,7 @@
(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
(CMP(L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(L|W|B) y x))
(CMP(L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(L|W|B) y x))
// strength reduction
// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:

View file

@ -916,7 +916,7 @@
(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
(CMP(Q|L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(Q|L|W|B) y x))
(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
// Using MOVZX instead of AND is cheaper.
(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x)

View file

@ -507,7 +507,7 @@
(TEQ x (MOVWconst [c])) => (TEQconst [c] x)
// Canonicalize the order of arguments to comparisons - helps with CSE.
(CMP x y) && x.ID > y.ID => (InvertFlags (CMP y x))
(CMP x y) && canonLessThan(x,y) => (InvertFlags (CMP y x))
// don't extend after proper load
// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.

View file

@ -1151,7 +1151,7 @@
(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
// Canonicalize the order of arguments to comparisons - helps with CSE.
((CMP|CMPW) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW) y x))
((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
// mul-neg => mneg
(NEG (MUL x y)) => (MNEG x y)

View file

@ -478,20 +478,24 @@ func init() {
// pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
{name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
{name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
{name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
{name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
{name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
{name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
{name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
{name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
{name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
{name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
{name: "LessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y false otherwise.
{name: "LessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y false otherwise.
{name: "GreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y false otherwise.
{name: "GreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y false otherwise.
{name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
{name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
{name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
{name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
{name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
{name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
{name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
{name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
{name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
{name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
{name: "LessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y false otherwise.
{name: "LessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y false otherwise.
{name: "GreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y false otherwise.
{name: "GreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y false otherwise.
{name: "NotLessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y || x is unordered with y, false otherwise.
{name: "NotLessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y || x is unordered with y, false otherwise.
{name: "NotGreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y || x is unordered with y, false otherwise.
{name: "NotGreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y || x is unordered with y, false otherwise.
// duffzero
// arg0 = address of memory to zero
// arg1 = mem

View file

@ -1088,7 +1088,7 @@
(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
// ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0

View file

@ -785,7 +785,7 @@
=> (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
// Canonicalize the order of arguments to comparisons - helps with CSE.
((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// Use sign/zero extend instead of RISBGZ.
(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)

View file

@ -2512,7 +2512,7 @@
(Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
&& t1.Compare(t2) == types.CMPeq
&& isSamePtr(tmp1, tmp2)
&& isStackPtr(src)
&& isStackPtr(src) && !isVolatile(src)
&& disjoint(src, s, tmp2, s)
&& (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
=> (Move {t1} [s] dst src midmem)
@ -2521,7 +2521,7 @@
(Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
&& t1.Compare(t2) == types.CMPeq
&& isSamePtr(tmp1, tmp2)
&& isStackPtr(src)
&& isStackPtr(src) && !isVolatile(src)
&& disjoint(src, s, tmp2, s)
&& (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
=> (Move {t1} [s] dst src midmem)

View file

@ -9,9 +9,9 @@ import (
"cmd/internal/src"
"fmt"
"html"
exec "internal/execabs"
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"

View file

@ -87,3 +87,29 @@ func (t LocPair) String() string {
}
return fmt.Sprintf("<%s,%s>", n0, n1)
}
type ArgPair struct {
reg *Register
mem LocalSlot
}
func (ap *ArgPair) Reg() int16 {
return ap.reg.objNum
}
func (ap *ArgPair) Type() *types.Type {
return ap.mem.Type
}
func (ap *ArgPair) Mem() *LocalSlot {
return &ap.mem
}
func (t ArgPair) String() string {
n0 := "nil"
if t.reg != nil {
n0 = t.reg.String()
}
n1 := t.mem.String()
return fmt.Sprintf("<%s,%s>", n0, n1)
}

View file

@ -5,6 +5,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
@ -70,7 +71,8 @@ type auxType int8
type Param struct {
Type *types.Type
Offset int32 // TODO someday this will be a register
Offset int32 // Offset of Param if not in a register.
Name *ir.Name // For OwnAux, need to prepend stores with Vardefs
}
type AuxCall struct {
@ -199,6 +201,12 @@ func ClosureAuxCall(args []Param, results []Param) *AuxCall {
func (*AuxCall) CanBeAnSSAAux() {}
// OwnAuxCall returns a function's own AuxCall
func OwnAuxCall(args []Param, results []Param) *AuxCall {
// TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate.
return &AuxCall{Fn: nil, args: args, results: results}
}
const (
auxNone auxType = iota
auxBool // auxInt is 0/1 for false/true

View file

@ -1564,6 +1564,10 @@ const (
OpARM64LessEqualF
OpARM64GreaterThanF
OpARM64GreaterEqualF
OpARM64NotLessThanF
OpARM64NotLessEqualF
OpARM64NotGreaterThanF
OpARM64NotGreaterEqualF
OpARM64DUFFZERO
OpARM64LoweredZero
OpARM64DUFFCOPY
@ -20798,6 +20802,42 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "NotLessThanF",
argLen: 1,
reg: regInfo{
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "NotLessEqualF",
argLen: 1,
reg: regInfo{
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "NotGreaterThanF",
argLen: 1,
reg: regInfo{
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "NotGreaterEqualF",
argLen: 1,
reg: regInfo{
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "DUFFZERO",
auxType: auxInt64,

View file

@ -521,6 +521,18 @@ func shiftIsBounded(v *Value) bool {
return v.AuxInt != 0
}
// canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing
// generated code as much as possible.
func canonLessThan(x, y *Value) bool {
if x.Op != y.Op {
return x.Op < y.Op
}
if !x.Pos.SameFileAndLine(y.Pos) {
return x.Pos.Before(y.Pos)
}
return x.ID < y.ID
}
// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern
// of the mantissa. It will panic if the truncation results in lost information.
func truncate64Fto32F(f float64) float32 {
@ -984,9 +996,10 @@ func flagArg(v *Value) *Value {
}
// arm64Negate finds the complement to an ARM64 condition code,
// for example Equal -> NotEqual or LessThan -> GreaterEqual
// for example !Equal -> NotEqual or !LessThan -> GreaterEqual
//
// TODO: add floating-point conditions
// For floating point, it's more subtle because NaN is unordered. We do
// !LessThanF -> NotLessThanF, the latter takes care of NaNs.
func arm64Negate(op Op) Op {
switch op {
case OpARM64LessThan:
@ -1010,13 +1023,21 @@ func arm64Negate(op Op) Op {
case OpARM64NotEqual:
return OpARM64Equal
case OpARM64LessThanF:
return OpARM64GreaterEqualF
case OpARM64GreaterThanF:
return OpARM64LessEqualF
return OpARM64NotLessThanF
case OpARM64NotLessThanF:
return OpARM64LessThanF
case OpARM64LessEqualF:
return OpARM64NotLessEqualF
case OpARM64NotLessEqualF:
return OpARM64LessEqualF
case OpARM64GreaterThanF:
return OpARM64NotGreaterThanF
case OpARM64NotGreaterThanF:
return OpARM64GreaterThanF
case OpARM64GreaterEqualF:
return OpARM64LessThanF
return OpARM64NotGreaterEqualF
case OpARM64NotGreaterEqualF:
return OpARM64GreaterEqualF
default:
panic("unreachable")
}
@ -1027,8 +1048,6 @@ func arm64Negate(op Op) Op {
// that the same result would be produced if the arguments
// to the flag-generating instruction were reversed, e.g.
// (InvertFlags (CMP x y)) -> (CMP y x)
//
// TODO: add floating-point conditions
func arm64Invert(op Op) Op {
switch op {
case OpARM64LessThan:
@ -1057,6 +1076,14 @@ func arm64Invert(op Op) Op {
return OpARM64GreaterEqualF
case OpARM64GreaterEqualF:
return OpARM64LessEqualF
case OpARM64NotLessThanF:
return OpARM64NotGreaterThanF
case OpARM64NotGreaterThanF:
return OpARM64NotLessThanF
case OpARM64NotLessEqualF:
return OpARM64NotGreaterEqualF
case OpARM64NotGreaterEqualF:
return OpARM64NotLessEqualF
default:
panic("unreachable")
}

View file

@ -1785,12 +1785,12 @@ func rewriteValue386_Op386CMPB(v *Value) bool {
return true
}
// match: (CMPB x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPB y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(Op386InvertFlags)
@ -2078,12 +2078,12 @@ func rewriteValue386_Op386CMPL(v *Value) bool {
return true
}
// match: (CMPL x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPL y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(Op386InvertFlags)
@ -2386,12 +2386,12 @@ func rewriteValue386_Op386CMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(Op386InvertFlags)

View file

@ -6749,12 +6749,12 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
return true
}
// match: (CMPB x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPB y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
@ -7135,12 +7135,12 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
return true
}
// match: (CMPL x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPL y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
@ -7544,12 +7544,12 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
return true
}
// match: (CMPQ x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPQ y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
@ -8106,12 +8106,12 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)

View file

@ -3728,12 +3728,12 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
return true
}
// match: (CMP x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpARMInvertFlags)

View file

@ -2772,12 +2772,12 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
return true
}
// match: (CMP x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpARM64InvertFlags)
@ -2941,12 +2941,12 @@ func rewriteValueARM64_OpARM64CMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpARM64InvertFlags)

View file

@ -4777,12 +4777,12 @@ func rewriteValuePPC64_OpPPC64CMP(v *Value) bool {
return true
}
// match: (CMP x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
@ -4834,12 +4834,12 @@ func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool {
return true
}
// match: (CMPU x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPU y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
@ -4964,12 +4964,12 @@ func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
@ -5045,12 +5045,12 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool {
return true
}
// match: (CMPWU x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPWU y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)

View file

@ -6332,12 +6332,12 @@ func rewriteValueS390X_OpS390XCMP(v *Value) bool {
return true
}
// match: (CMP x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
@ -6389,12 +6389,12 @@ func rewriteValueS390X_OpS390XCMPU(v *Value) bool {
return true
}
// match: (CMPU x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPU y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
@ -6624,12 +6624,12 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
@ -6721,12 +6721,12 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool {
return true
}
// match: (CMPWU x y)
// cond: x.ID > y.ID
// cond: canonLessThan(x,y)
// result: (InvertFlags (CMPWU y x))
for {
x := v_0
y := v_1
if !(x.ID > y.ID) {
if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)

View file

@ -13637,7 +13637,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
return true
}
// match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
// cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
// cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
// result: (Move {t1} [s] dst src midmem)
for {
s := auxIntToInt64(v.AuxInt)
@ -13651,7 +13651,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
t2 := auxToType(midmem.Aux)
src := midmem.Args[1]
tmp2 := midmem.Args[0]
if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
break
}
v.reset(OpMove)
@ -13661,7 +13661,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
return true
}
// match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
// cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
// cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
// result: (Move {t1} [s] dst src midmem)
for {
s := auxIntToInt64(v.AuxInt)
@ -13679,7 +13679,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
t2 := auxToType(midmem_0.Aux)
src := midmem_0.Args[1]
tmp2 := midmem_0.Args[0]
if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
break
}
v.reset(OpMove)

View file

@ -138,13 +138,12 @@ func ReadSymABIs(file, myimportpath string) {
// For body-less functions, we only create the LSym; for functions
// with bodies call a helper to setup up / populate the LSym.
func InitLSym(f *ir.Func, hasBody bool) {
staticdata.NeedFuncSym(f.Sym())
// FIXME: for new-style ABI wrappers, we set up the lsym at the
// point the wrapper is created.
if f.LSym != nil && base.Flag.ABIWrap {
return
}
staticdata.NeedFuncSym(f.Sym())
selectLSym(f, hasBody)
if hasBody {
setupTextLSym(f, 0)
@ -155,18 +154,18 @@ func InitLSym(f *ir.Func, hasBody bool) {
// makes calls to helpers to create ABI wrappers if needed.
func selectLSym(f *ir.Func, hasBody bool) {
if f.LSym != nil {
base.FatalfAt(f.Pos(), "Func.initLSym called twice on %v", f)
base.FatalfAt(f.Pos(), "InitLSym called twice on %v", f)
}
if nam := f.Nname; !ir.IsBlank(nam) {
var wrapperABI obj.ABI
needABIWrapper := false
defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
defABI, hasDefABI := symabiDefs[nam.Linksym().Name]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
f.LSym = nam.Sym().LinksymABI0()
f.LSym = nam.LinksymABI(obj.ABI0)
needABIWrapper, wrapperABI = true, obj.ABIInternal
} else {
f.LSym = nam.Linksym()
@ -302,8 +301,9 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
// extra work in typecheck/walk/ssa, might want to add a new node
// OTAILCALL or something to this effect.
var tail ir.Node
if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 {
tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym())
if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
tail = ir.NewTailCallStmt(base.Pos, f.Nname)
} else {
call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil)
call.Args = ir.ParamNames(tfn.Type())

View file

@ -45,7 +45,7 @@ type nowritebarrierrecCall struct {
}
// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
// must be called before transformclosure and walk.
// must be called before walk
func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
c := &nowritebarrierrecChecker{
extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
@ -54,7 +54,7 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
// Find all systemstack calls and record their targets. In
// general, flow analysis can't see into systemstack, but it's
// important to handle it for this check, so we model it
// directly. This has to happen before transformclosure since
// directly. This has to happen before transforming closures in walk since
// it's a lot harder to work out the argument after.
for _, n := range typecheck.Target.Decls {
if n.Op() != ir.ODCLFUNC {

View file

@ -96,7 +96,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
if n, ok := v.Aux.(*ir.Name); ok {
switch n.Class {
case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
// Don't modify RegFP; it is a global.
if n != ir.RegFP {
n.SetUsed(true)
}

View file

@ -124,6 +124,7 @@ func InitConfig() {
ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
@ -356,6 +357,13 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
if fn.Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
if fn.Pragma&ir.RegisterParams != 0 { // TODO remove after register abi is working
if strings.Contains(name, ".") {
base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name)
}
s.f.Warnl(fn.Pos(), "declared function %v has register params", fn)
}
s.panics = map[funcLine]*ssa.Block{}
s.softFloat = s.config.SoftFloat
@ -392,11 +400,20 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
// Skip doing open defers if there is any extra exit code (likely
// copying heap-allocated return values or race detection), since
// we will not generate that code in the case of the extra
// deferreturn/ret segment.
// race detection), since we will not generate that code in the
// case of the extra deferreturn/ret segment.
s.hasOpenDefers = false
}
if s.hasOpenDefers {
// Similarly, skip if there are any heap-allocated result
// parameters that need to be copied back to their stack slots.
for _, f := range s.curfn.Type().Results().FieldSlice() {
if !f.Nname.(*ir.Name).OnStack() {
s.hasOpenDefers = false
break
}
}
}
if s.hasOpenDefers &&
s.curfn.NumReturns*s.curfn.NumDefers > 15 {
// Since we are generating defer calls at every exit for
@ -442,24 +459,15 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())})
case ir.PPARAMOUT:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())})
if s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
s.returns = append(s.returns, n)
}
results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset()), Name: n})
case ir.PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
case ir.PAUTOHEAP:
// moved to heap - already handled by frontend
case ir.PFUNC:
// local function - already handled by frontend
default:
s.Fatalf("local variable with class %v unimplemented", n.Class)
}
}
s.f.OwnAux = ssa.OwnAuxCall(args, results)
// Populate SSAable arguments.
for _, n := range fn.Dcl {
@ -481,38 +489,35 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
offset = types.Rnd(offset, typ.Alignment())
r := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
offset += typ.Size()
if n.Byval() && TypeOK(n.Type()) {
// If it is a small variable captured by value, downgrade it to PAUTO.
r = s.load(n.Type(), r)
// If n is a small variable captured by value, promote
// it to PAUTO so it can be converted to SSA.
//
// Note: While we never capture a variable by value if
// the user took its address, we may have generated
// runtime calls that did (#43701). Since we don't
// convert Addrtaken variables to SSA anyway, no point
// in promoting them either.
if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
n.Class = ir.PAUTO
} else {
if !n.Byval() {
r = s.load(typ, r)
}
// Declare variable holding address taken from closure.
addr := ir.NewNameAt(fn.Pos(), &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
addr.SetType(types.NewPtr(n.Type()))
addr.Class = ir.PAUTO
addr.SetUsed(true)
addr.Curfn = fn
types.CalcSize(addr.Type())
n.Heapaddr = addr
n = addr
fn.Dcl = append(fn.Dcl, n)
s.assign(n, s.load(n.Type(), ptr), false, 0)
continue
}
fn.Dcl = append(fn.Dcl, n)
s.assign(n, r, false, 0)
if !n.Byval() {
ptr = s.load(typ, ptr)
}
s.setHeapaddr(fn.Pos(), n, ptr)
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmtList(fn.Enter)
s.zeroResults()
s.paramsToHeap()
s.stmtList(fn.Body)
// fallthrough to exit
@ -528,6 +533,8 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
}
s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
s.insertPhis()
// Main call to ssa package to compile function
@ -540,6 +547,100 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
return s.f
}
// zeroResults zeros the return values at the start of the function.
// We need to do this very early in the function. Defer might stop a
// panic and show the return values as they exist at the time of
// panic. For precise stacks, the garbage collector assumes results
// are always live, so we need to zero them before any allocations,
// even allocations to move params/results to the heap.
func (s *state) zeroResults() {
for _, f := range s.curfn.Type().Results().FieldSlice() {
n := f.Nname.(*ir.Name)
if !n.OnStack() {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:(*liveness).epilogue.
continue
}
// Zero the stack location containing f.
if typ := n.Type(); TypeOK(typ) {
s.assign(n, s.zeroVal(typ), false, 0)
} else {
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.zero(n.Type(), s.decladdrs[n])
}
}
}
// paramsToHeap produces code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
func (s *state) paramsToHeap() {
do := func(params *types.Type) {
for _, f := range params.FieldSlice() {
if f.Nname == nil {
continue // anonymous or blank parameter
}
n := f.Nname.(*ir.Name)
if ir.IsBlank(n) || n.OnStack() {
continue
}
s.newHeapaddr(n)
if n.Class == ir.PPARAM {
s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
}
}
}
typ := s.curfn.Type()
do(typ.Recvs())
do(typ.Params())
do(typ.Results())
}
// newHeapaddr allocates heap memory for n and sets its heap address.
func (s *state) newHeapaddr(n *ir.Name) {
s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
}
// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
// and then sets it as n's heap address.
func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
}
// Declare variable to hold address.
addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
addr.SetType(types.NewPtr(n.Type()))
addr.Class = ir.PAUTO
addr.SetUsed(true)
addr.Curfn = s.curfn
s.curfn.Dcl = append(s.curfn.Dcl, addr)
types.CalcSize(addr.Type())
if n.Class == ir.PPARAMOUT {
addr.SetIsOutputParamHeapAddr(true)
}
n.Heapaddr = addr
s.assign(addr, ptr, false, 0)
}
// newObject returns an SSA value denoting new(typ).
func (s *state) newObject(typ *types.Type) *ssa.Value {
if typ.Size() == 0 {
return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
}
return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
}
// reflectType returns an SSA value representing a pointer to typ's
// reflection type descriptor.
func (s *state) reflectType(typ *types.Type) *ssa.Value {
lsym := reflectdata.TypeLinksym(typ)
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
}
func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
// Read sources of target function fn.
fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
@ -675,7 +776,7 @@ type state struct {
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[ir.Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
// addresses of PPARAM and PPARAMOUT variables on the stack.
decladdrs map[*ir.Name]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
@ -695,9 +796,6 @@ type state struct {
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
// list of PPARAMOUT (return) variables.
returns []*ir.Name
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
softFloat bool
@ -1283,8 +1381,8 @@ func (s *state) stmt(n ir.Node) {
case ir.ODCL:
n := n.(*ir.Decl)
if n.X.Class == ir.PAUTOHEAP {
s.Fatalf("DCL %v", n)
if v := n.X; v.Esc() == ir.EscHeap {
s.newHeapaddr(v)
}
case ir.OLABEL:
@ -1414,10 +1512,10 @@ func (s *state) stmt(n ir.Node) {
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
//if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
// j = nil
//}
//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
//if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
@ -1486,11 +1584,11 @@ func (s *state) stmt(n ir.Node) {
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
case ir.ORETJMP:
n := n.(*ir.BranchStmt)
case ir.OTAILCALL:
n := n.(*ir.TailCallStmt)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
b.Aux = callTargetLSym(n.Label, s.curfn.LSym)
b.Aux = callTargetLSym(n.Target, s.curfn.LSym)
case ir.OCONTINUE, ir.OBREAK:
n := n.(*ir.BranchStmt)
@ -1704,6 +1802,7 @@ const shareDeferExits = false
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
lateResultLowering := s.f.DebugTest && ssa.LateCallExpansionEnabledWithin(s.f)
if s.hasdefer {
if s.hasOpenDefers {
if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
@ -1720,24 +1819,61 @@ func (s *state) exit() *ssa.Block {
}
}
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
s.stmtList(s.curfn.Exit)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
addr := s.decladdrs[n]
val := s.variable(n, n.Type())
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.store(n.Type(), addr, val)
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
}
var b *ssa.Block
var m *ssa.Value
// Do actual return.
m := s.mem()
b := s.endBlock()
// These currently turn into self-copies (in many cases).
if lateResultLowering {
resultFields := s.curfn.Type().Results().FieldSlice()
results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
// Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
for i, f := range resultFields {
n := f.Nname.(*ir.Name)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
if s.canSSA(n) { // result is in some SSA variable
results[i] = s.variable(n, n.Type())
} else if !n.OnStack() { // result is actually heap allocated
ha := s.expr(n.Heapaddr)
s.instrumentFields(n.Type(), ha, instrumentRead)
results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
} else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
// Before register ABI this ought to be a self-move, home=dest,
// With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
}
}
// Run exit code. Today, this is just racefuncexit, in -race mode.
// TODO this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
// Spills in register allocation might just fix it.
s.stmtList(s.curfn.Exit)
results[len(results)-1] = s.mem()
m.AddArgs(results...)
} else {
// Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
for _, f := range s.curfn.Type().Results().FieldSlice() {
n := f.Nname.(*ir.Name)
if s.canSSA(n) {
val := s.variable(n, n.Type())
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.store(n.Type(), s.decladdrs[n], val)
} else if !n.OnStack() {
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.move(n.Type(), s.decladdrs[n], s.expr(n.Heapaddr))
} // else, on stack but too large to SSA, the result is already in its destination by construction, so no store needed.
// TODO: if (SSA) val is ever spilled, we'd like to use the PPARAMOUT slot for spilling it. That won't happen currently.
}
// Run exit code. Today, this is just racefuncexit, in -race mode.
s.stmtList(s.curfn.Exit)
// Do actual return.
m = s.mem()
}
b = s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
if s.hasdefer && s.hasOpenDefers {
@ -2159,15 +2295,10 @@ func (s *state) expr(n ir.Node) *ssa.Value {
if s.canSSA(n) {
return s.variable(n, n.Type())
}
addr := s.addr(n)
return s.load(n.Type(), addr)
case ir.ONAMEOFFSET:
n := n.(*ir.NameOffsetExpr)
if s.canSSAName(n.Name_) && TypeOK(n.Type()) {
return s.variable(n, n.Type())
}
addr := s.addr(n)
return s.load(n.Type(), addr)
return s.load(n.Type(), s.addr(n))
case ir.OLINKSYMOFFSET:
n := n.(*ir.LinksymOffsetExpr)
return s.load(n.Type(), s.addr(n))
case ir.ONIL:
n := n.(*ir.NilExpr)
t := n.Type()
@ -2936,14 +3067,9 @@ func (s *state) expr(n ir.Node) *ssa.Value {
}
return s.zeroVal(n.Type())
case ir.ONEWOBJ:
case ir.ONEW:
n := n.(*ir.UnaryExpr)
if n.Type().Elem().Size() == 0 {
return s.newValue1A(ssa.OpAddr, n.Type(), ir.Syms.Zerobase, s.sb)
}
typ := s.expr(n.X)
vv := s.rtcall(ir.Syms.Newobject, true, []*types.Type{n.Type()}, typ)
return vv[0]
return s.newObject(n.Type().Elem())
default:
s.Fatalf("unhandled expr %v", n.Op())
@ -3260,7 +3386,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
if base, ok := clobberBase(left).(*ir.Name); ok && base.Op() == ir.ONAME && base.Class != ir.PEXTERN && base.Class != ir.PAUTOHEAP && skip == 0 {
if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
}
@ -4366,30 +4492,8 @@ func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
// Construct map of temps; see comments in s.call about the structure of n.
temps := map[ir.Node]*ssa.Value{}
for _, a := range n.Args {
if a.Op() != ir.OAS {
s.Fatalf("non-assignment as a temp function argument %v", a.Op())
}
a := a.(*ir.AssignStmt)
l, r := a.X, a.Y
if l.Op() != ir.ONAME {
s.Fatalf("non-ONAME temp function argument %v", a.Op())
}
// Evaluate and store to "temporary".
// Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r)
}
args := make([]*ssa.Value, len(n.Rargs))
for i, n := range n.Rargs {
// Store a value to an argument slot.
if x, ok := temps[n]; ok {
// This is a previously computed temporary.
args[i] = x
continue
}
// This is an explicit value; evaluate it.
args := make([]*ssa.Value, len(n.Args))
for i, n := range n.Args {
args[i] = s.expr(n)
}
return args
@ -4402,13 +4506,6 @@ func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
// Do any needed expression evaluation for the args (including the
// receiver, if any). This may be evaluating something like 'autotmp_3 =
// once.mutex'. Such a statement will create a mapping in s.vars[] from
// the autotmp name to the evaluated SSA arg value, but won't do any
// stores to the stack.
s.stmtList(n.Args)
var args []*ssa.Value
var argNodes []*ir.Name
@ -4441,7 +4538,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
}
for _, argn := range n.Rargs {
for _, argn := range n.Args {
var v *ssa.Value
if TypeOK(argn.Type()) {
v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
@ -4667,7 +4764,7 @@ func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
// Returns the address of the return value (or nil if none).
func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var sym *types.Sym // target symbol (if static)
var callee *ir.Name // target function (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
@ -4685,13 +4782,21 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
testLateExpansion := false
inRegisters := false
switch n.Op() {
case ir.OCALLFUNC:
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
fn := fn.(*ir.Name)
sym = fn.Sym()
callee = fn
// TODO remove after register abi is working
inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
inRegisters = inRegistersImported || inRegistersSamePackage
if inRegisters {
s.f.Warnl(n.Pos(), "called function %v has register params", callee)
}
break
}
closure = s.expr(fn)
@ -4719,11 +4824,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
types.CalcSize(fn.Type())
stksize := fn.Type().ArgWidth() // includes receiver, args, and results
// Run all assignments of temps.
// The temps are introduced to avoid overwriting argument
// slots when arguments themselves require function calls.
s.stmtList(n.Args)
var call *ssa.Value
if k == callDeferStack {
testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
@ -4757,7 +4857,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// Then, store all the arguments of the defer call.
ft := fn.Type()
off := t.FieldOff(12)
args := n.Rargs
args := n.Args
// Set receiver (for interface calls). Always a pointer.
if rcvr != nil {
@ -4832,7 +4932,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// Write args.
t := n.X.Type()
args := n.Rargs
args := n.Args
if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
}
@ -4885,13 +4985,13 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
} else {
call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
}
case sym != nil:
case callee != nil:
if testLateExpansion {
aux := ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults)
aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults)
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
} else {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults), s.mem())
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults), s.mem())
}
default:
s.Fatalf("bad call type %v %v", n.Op(), n)
@ -4987,24 +5087,27 @@ func (s *state) addr(n ir.Node) *ssa.Value {
}
t := types.NewPtr(n.Type())
var offset int64
linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if offset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
}
return v
}
switch n.Op() {
case ir.ONAMEOFFSET:
no := n.(*ir.NameOffsetExpr)
offset = no.Offset_
n = no.Name_
fallthrough
case ir.OLINKSYMOFFSET:
no := n.(*ir.LinksymOffsetExpr)
return linksymOffset(no.Linksym, no.Offset_)
case ir.ONAME:
n := n.(*ir.Name)
if n.Heapaddr != nil {
return s.expr(n.Heapaddr)
}
switch n.Class {
case ir.PEXTERN:
// global variable
v := s.entryNewValue1A(ssa.OpAddr, t, n.Linksym(), s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if offset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
}
return v
return linksymOffset(n.Linksym(), 0)
case ir.PPARAM:
// parameter slot
v := s.decladdrs[n]
@ -5024,8 +5127,6 @@ func (s *state) addr(n ir.Node) *ssa.Value {
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
case ir.PAUTOHEAP:
return s.expr(n.Heapaddr)
default:
s.Fatalf("variable address class %v not implemented", n.Class)
return nil
@ -5126,15 +5227,10 @@ func (s *state) canSSA(n ir.Node) bool {
}
func (s *state) canSSAName(name *ir.Name) bool {
if name.Addrtaken() {
return false
}
if ir.IsParamHeapCopy(name) {
if name.Addrtaken() || !name.OnStack() {
return false
}
switch name.Class {
case ir.PEXTERN, ir.PAUTOHEAP:
return false
case ir.PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
@ -5160,7 +5256,7 @@ func (s *state) canSSAName(name *ir.Name) bool {
// TODO: try to make more variables SSAable?
}
// canSSA reports whether variables of type t are SSA-able.
// TypeOK reports whether variables of type t are SSA-able.
func TypeOK(t *types.Type) bool {
types.CalcSize(t)
if t.Width > int64(4*types.PtrSize) {
@ -6013,8 +6109,8 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X) // input interface
target := s.expr(n.DstType) // target type
iface := s.expr(n.X) // input interface
target := s.reflectType(n.Type()) // target type
byteptr := s.f.Config.Types.BytePtr
if n.Type().IsInterface() {
@ -6148,7 +6244,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.expr(n.SrcType)
taddr := s.reflectType(n.X.Type())
if n.X.Type().IsEmptyInterface() {
s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
} else {
@ -6365,7 +6461,7 @@ func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
// in the generated code.
if p.IsStmt() != src.PosIsStmt {
p = p.WithNotStmt()
// Calls use the pos attached to v, but copy the statement mark from SSAGenState
// Calls use the pos attached to v, but copy the statement mark from State
}
s.SetPos(p)
} else {
@ -6374,57 +6470,6 @@ func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
}
}
// byXoffset implements sort.Interface for []*ir.Name using Xoffset as the ordering.
type byXoffset []*ir.Name
func (s byXoffset) Len() int { return len(s) }
func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *objw.Progs) {
var vars []*ir.Name
for _, n := range e.curfn.Dcl {
if liveness.ShouldTrack(n) && n.Addrtaken() {
vars = append(vars, n)
}
}
if len(vars) == 0 {
return
}
// Sort variables from lowest to highest address.
sort.Sort(byXoffset(vars))
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
x := e.curfn.LSym.Func().StackObjects
off := 0
off = objw.Uintptr(x, off, uint64(len(vars)))
for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp.
off = objw.Uintptr(x, off, uint64(v.FrameOffset()))
if !types.TypeSym(v.Type()).Siggen() {
e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
}
off = objw.SymPtr(x, off, reflectdata.WriteType(v.Type()), 0)
}
// Emit a funcdata pointing at the stack object data.
p := pp.Prog(obj.AFUNCDATA)
p.From.SetConst(objabi.FUNCDATA_StackObjects)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
if base.Flag.Live != 0 {
for _, v := range vars {
base.WarnfAt(v.Pos(), "stack object %v %s", v, v.Type().String())
}
}
}
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *objw.Progs) {
var s State
@ -6432,7 +6477,6 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
e := f.Frontend().(*ssafn)
s.livenessMap = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
emitStackObjects(e, pp)
openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
@ -7165,7 +7209,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot
if n.Type().IsEmptyInterface() {
f = ".type"
}
c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
c := e.SplitSlot(&name, f, 0, u) // see comment in typebits.Set
d := e.SplitSlot(&name, ".data", u.Size(), t)
return c, d
}
@ -7345,31 +7389,26 @@ func clobberBase(n ir.Node) ir.Node {
//
// 3. in all other cases, want the regular ABIInternal linksym
//
func callTargetLSym(callee *types.Sym, callerLSym *obj.LSym) *obj.LSym {
func callTargetLSym(callee *ir.Name, callerLSym *obj.LSym) *obj.LSym {
lsym := callee.Linksym()
if !base.Flag.ABIWrap {
return lsym
}
if ir.AsNode(callee.Def) == nil {
fn := callee.Func
if fn == nil {
return lsym
}
defn := ir.AsNode(callee.Def).Name().Defn
if defn == nil {
return lsym
}
ndclfunc := defn.(*ir.Func)
// check for case 1 above
if callerLSym.ABIWrapper() {
if nlsym := ndclfunc.LSym; nlsym != nil {
if nlsym := fn.LSym; nlsym != nil {
lsym = nlsym
}
} else {
// check for case 2 above
nam := ndclfunc.Nname
defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
defABI, hasDefABI := symabiDefs[lsym.Name]
if hasDefABI && defABI == obj.ABI0 {
lsym = nam.Sym().LinksymABI0()
lsym = callee.LinksymABI(obj.ABI0)
}
}
return lsym

View file

@ -25,55 +25,38 @@ import (
"cmd/internal/src"
)
// InitAddr writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
func InitAddr(n *ir.Name, noff int64, a *ir.Name, aoff int64) {
// InitAddrOffset writes the static name symbol lsym to n, it does not modify n.
// It's the caller responsibility to make sure lsym is from ONAME/PEXTERN node.
func InitAddrOffset(n *ir.Name, noff int64, lsym *obj.LSym, off int64) {
if n.Op() != ir.ONAME {
base.Fatalf("addrsym n op %v", n.Op())
base.Fatalf("InitAddr n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("addrsym nil n sym")
}
if a.Op() != ir.ONAME {
base.Fatalf("addrsym a op %v", a.Op())
base.Fatalf("InitAddr nil n sym")
}
s := n.Linksym()
s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Linksym(), aoff)
s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, off)
}
// InitFunc writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
func InitFunc(n *ir.Name, noff int64, f *ir.Name) {
if n.Op() != ir.ONAME {
base.Fatalf("pfuncsym n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("pfuncsym nil n sym")
}
if f.Class != ir.PFUNC {
base.Fatalf("pfuncsym class not PFUNC %d", f.Class)
}
s := n.Linksym()
s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncLinksym(f), 0)
// InitAddr is InitAddrOffset, with offset fixed to 0.
func InitAddr(n *ir.Name, noff int64, lsym *obj.LSym) {
InitAddrOffset(n, noff, lsym, 0)
}
// InitSlice writes a static slice symbol {&arr, lencap, lencap} to n+noff.
// InitSlice does not modify n.
func InitSlice(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
// InitSlice writes a static slice symbol {lsym, lencap, lencap} to n+noff, it does not modify n.
// It's the caller responsibility to make sure lsym is from ONAME node.
func InitSlice(n *ir.Name, noff int64, lsym *obj.LSym, lencap int64) {
s := n.Linksym()
if arr.Op() != ir.ONAME {
base.Fatalf("slicesym non-name arr %v", arr)
}
s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Linksym(), 0)
s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, 0)
s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap)
s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap)
}
func InitSliceBytes(nam *ir.Name, off int64, s string) {
if nam.Op() != ir.ONAME {
base.Fatalf("slicebytes %v", nam)
base.Fatalf("InitSliceBytes %v", nam)
}
InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
InitSlice(nam, off, slicedata(nam.Pos(), s).Linksym(), int64(len(s)))
}
const (
@ -243,14 +226,14 @@ func FuncSym(s *types.Sym) *types.Sym {
// except for the types package, which is protected separately.
// Reusing funcsymsmu to also cover this package lookup
// avoids a general, broader, expensive package lookup mutex.
// Note makefuncsym also does package look-up of func sym names,
// Note NeedFuncSym also does package look-up of func sym names,
// but that it is only called serially, from the front end.
funcsymsmu.Lock()
sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s))
// Don't export s·f when compiling for dynamic linking.
// When dynamically linking, the necessary function
// symbols will be created explicitly with makefuncsym.
// See the makefuncsym comment for details.
// symbols will be created explicitly with NeedFuncSym.
// See the NeedFuncSym comment for details.
if !base.Ctxt.Flag_dynlink && !existed {
funcsyms = append(funcsyms, s)
}
@ -265,6 +248,13 @@ func FuncLinksym(n *ir.Name) *obj.LSym {
return FuncSym(n.Sym()).Linksym()
}
func GlobalLinksym(n *ir.Name) *obj.LSym {
if n.Op() != ir.ONAME || n.Class != ir.PEXTERN {
base.Fatalf("expected global variable: %v", n)
}
return n.Linksym()
}
// NeedFuncSym ensures that s·f is exported, if needed.
// It is only used with -dynlink.
// When not compiling for dynamic linking,
@ -297,7 +287,7 @@ func NeedFuncSym(s *types.Sym) {
func WriteFuncSyms() {
sort.Slice(funcsyms, func(i, j int) bool {
return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
return funcsyms[i].Linksym().Name < funcsyms[j].Linksym().Name
})
for _, s := range funcsyms {
sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym()
@ -310,16 +300,16 @@ func WriteFuncSyms() {
// Neither n nor c is modified.
func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) {
if n.Op() != ir.ONAME {
base.Fatalf("litsym n op %v", n.Op())
base.Fatalf("InitConst n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("litsym nil n sym")
base.Fatalf("InitConst nil n sym")
}
if c.Op() == ir.ONIL {
return
}
if c.Op() != ir.OLITERAL {
base.Fatalf("litsym c op %v", c.Op())
base.Fatalf("InitConst c op %v", c.Op())
}
s := n.Linksym()
switch u := c.Val(); u.Kind() {
@ -358,6 +348,6 @@ func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) {
s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i)))
default:
base.Fatalf("litsym unhandled OLITERAL %v", c)
base.Fatalf("InitConst unhandled OLITERAL %v", c)
}
}

View file

@ -23,13 +23,7 @@ const (
embedFiles
)
func embedFileList(v *ir.Name) []string {
kind := embedKind(v.Type())
if kind == embedUnknown {
base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
return nil
}
func embedFileList(v *ir.Name, kind int) []string {
// Build list of files to store.
have := make(map[string]bool)
var list []string
@ -71,38 +65,15 @@ func embedFileList(v *ir.Name) []string {
return list
}
// embedKindApprox determines the kind of embedding variable, approximately.
// The match is approximate because we haven't done scope resolution yet and
// can't tell whether "string" and "byte" really mean "string" and "byte".
// The result must be confirmed later, after type checking, using embedKind.
func embedKindApprox(typ ir.Node) int {
if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
// These are not guaranteed to match only string and []byte -
// maybe the local package has redefined one of those words.
// But it's the best we can do now during the noder.
// The stricter check happens later, in initEmbed calling embedKind.
if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == types.LocalPkg {
return embedString
}
if typ, ok := typ.(*ir.SliceType); ok {
if sym := typ.Elem.Sym(); sym != nil && sym.Name == "byte" && sym.Pkg == types.LocalPkg {
return embedBytes
}
}
return embedUnknown
}
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
if typ == types.Types[types.TSTRING] {
if typ.Kind() == types.TSTRING {
return embedString
}
if typ.Sym() == nil && typ.IsSlice() && typ.Elem() == types.ByteType {
if typ.Sym() == nil && typ.IsSlice() && typ.Elem().Kind() == types.TUINT8 {
return embedBytes
}
return embedUnknown
@ -134,11 +105,28 @@ func embedFileLess(x, y string) bool {
// WriteEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func WriteEmbed(v *ir.Name) {
files := embedFileList(v)
switch kind := embedKind(v.Type()); kind {
case embedUnknown:
base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
// TODO(mdempsky): User errors should be reported by the frontend.
commentPos := (*v.Embed)[0].Pos
if !types.AllowsGoVersion(types.LocalPkg, 1, 16) {
prevPos := base.Pos
base.Pos = commentPos
base.ErrorfVers("go1.16", "go:embed")
base.Pos = prevPos
return
}
if base.Flag.Cfg.Embed.Patterns == nil {
base.ErrorfAt(commentPos, "invalid go:embed: build system did not supply embed configuration")
return
}
kind := embedKind(v.Type())
if kind == embedUnknown {
base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
return
}
files := embedFileList(v, kind)
switch kind {
case embedString, embedBytes:
file := files[0]
fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)

View file

@ -15,6 +15,7 @@ import (
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
)
type Entry struct {
@ -80,7 +81,7 @@ func (s *Schedule) tryStaticInit(nn ir.Node) bool {
func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
if rn.Class == ir.PFUNC {
// TODO if roff != 0 { panic }
staticdata.InitFunc(l, loff, rn)
staticdata.InitAddr(l, loff, staticdata.FuncLinksym(rn))
return true
}
if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
@ -137,9 +138,8 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty
case ir.OADDR:
r := r.(*ir.AddrExpr)
if a := r.X; a.Op() == ir.ONAME {
a := a.(*ir.Name)
staticdata.InitAddr(l, loff, a, 0)
if a, ok := r.X.(*ir.Name); ok && a.Op() == ir.ONAME {
staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(a))
return true
}
@ -148,14 +148,14 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty
switch r.X.Op() {
case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
// copy pointer
staticdata.InitAddr(l, loff, s.Temps[r], 0)
staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(s.Temps[r]))
return true
}
case ir.OSLICELIT:
r := r.(*ir.CompLitExpr)
// copy slice
staticdata.InitSlice(l, loff, s.Temps[r], r.Len)
staticdata.InitSlice(l, loff, staticdata.GlobalLinksym(s.Temps[r]), r.Len)
return true
case ir.OARRAYLIT, ir.OSTRUCTLIT:
@ -199,6 +199,20 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
r = r.(*ir.ConvExpr).X
}
assign := func(pos src.XPos, a *ir.Name, aoff int64, v ir.Node) {
if s.StaticAssign(a, aoff, v, v.Type()) {
return
}
var lhs ir.Node
if ir.IsBlank(a) {
// Don't use NameOffsetExpr with blank (#43677).
lhs = ir.BlankNode
} else {
lhs = ir.NewNameOffsetExpr(pos, a, aoff, v.Type())
}
s.append(ir.NewAssignStmt(pos, lhs, v))
}
switch r.Op() {
case ir.ONAME:
r := r.(*ir.Name)
@ -220,8 +234,8 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
case ir.OADDR:
r := r.(*ir.AddrExpr)
if name, offset, ok := StaticLoc(r.X); ok {
staticdata.InitAddr(l, loff, name, offset)
if name, offset, ok := StaticLoc(r.X); ok && name.Class == ir.PEXTERN {
staticdata.InitAddrOffset(l, loff, name.Linksym(), offset)
return true
}
fallthrough
@ -234,12 +248,10 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
a := StaticName(r.X.Type())
s.Temps[r] = a
staticdata.InitAddr(l, loff, a, 0)
staticdata.InitAddr(l, loff, a.Linksym())
// Init underlying literal.
if !s.StaticAssign(a, 0, r.X, a.Type()) {
s.append(ir.NewAssignStmt(base.Pos, a, r.X))
}
assign(base.Pos, a, 0, r.X)
return true
}
//dump("not static ptrlit", r);
@ -260,7 +272,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
ta.SetNoalg(true)
a := StaticName(ta)
s.Temps[r] = a
staticdata.InitSlice(l, loff, a, r.Len)
staticdata.InitSlice(l, loff, a.Linksym(), r.Len)
// Fall through to init underlying array.
l = a
loff = 0
@ -278,10 +290,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
continue
}
ir.SetPos(e.Expr)
if !s.StaticAssign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) {
a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type())
s.append(ir.NewAssignStmt(base.Pos, a, e.Expr))
}
assign(base.Pos, l, loff+e.Xoffset, e.Expr)
}
return true
@ -298,7 +307,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
// Closures with no captured variables are globals,
// so the assignment can be done at link time.
// TODO if roff != 0 { panic }
staticdata.InitFunc(l, loff, r.Func.Nname)
staticdata.InitAddr(l, loff, staticdata.FuncLinksym(r.Func.Nname))
return true
}
ir.ClosureDebugRuntimeCheck(r)
@ -335,7 +344,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
// Create a copy of l to modify while we emit data.
// Emit itab, advance offset.
staticdata.InitAddr(l, loff, itab.X.(*ir.Name), 0)
staticdata.InitAddr(l, loff, itab.X.(*ir.LinksymOffsetExpr).Linksym)
// Emit data.
if types.IsDirectIface(val.Type()) {
@ -345,18 +354,13 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
}
// Copy val directly into n.
ir.SetPos(val)
if !s.StaticAssign(l, loff+int64(types.PtrSize), val, val.Type()) {
a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(types.PtrSize), val.Type())
s.append(ir.NewAssignStmt(base.Pos, a, val))
}
assign(base.Pos, l, loff+int64(types.PtrSize), val)
} else {
// Construct temp to hold val, write pointer to temp into n.
a := StaticName(val.Type())
s.Temps[val] = a
if !s.StaticAssign(a, 0, val, val.Type()) {
s.append(ir.NewAssignStmt(base.Pos, a, val))
}
staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0)
assign(base.Pos, a, 0, val)
staticdata.InitAddr(l, loff+int64(types.PtrSize), a.Linksym())
}
return true
@ -450,7 +454,7 @@ var statuniqgen int // name generator for static temps
// StaticName returns a name backed by a (writable) static data symbol.
// Use readonlystaticname for read-only node.
func StaticName(t *types.Type) *ir.Name {
// Don't use lookupN; it interns the resulting string, but these are all unique.
// Don't use LookupNum; it interns the resulting string, but these are all unique.
n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
typecheck.Declare(n, ir.PEXTERN)

View file

@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package test
import (
"bufio"
"cmd/compile/internal/abi"
"cmd/compile/internal/base"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
@ -20,12 +21,7 @@ import (
// AMD64 registers available:
// - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11
// - floating point: X0 - X14
var configAMD64 = ABIConfig{
regAmounts: RegAmounts{
intRegs: 9,
floatRegs: 15,
},
}
var configAMD64 = abi.NewABIConfig(9,15)
func TestMain(m *testing.M) {
ssagen.Arch.LinkArch = &x86.Linkamd64

View file

@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package test
// This file contains utility routines and harness infrastructure used
// by the ABI tests in "abiutils_test.go".
import (
"cmd/compile/internal/abi"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@ -75,7 +76,7 @@ func tokenize(src string) []string {
return res
}
func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment, which string, idx int) int {
func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int {
n := ir.AsNode(f.Nname).(*ir.Name)
if n.FrameOffset() != int64(r.Offset) {
t.Errorf("%s %d: got offset %d wanted %d t=%v",
@ -110,7 +111,7 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
types.CalcSize(ft)
// Analyze with full set of registers.
regRes := ABIAnalyze(ft, configAMD64)
regRes := abi.ABIAnalyze(ft, configAMD64)
regResString := strings.TrimSpace(regRes.String())
// Check results.
@ -121,12 +122,12 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
}
// Analyze again with empty register set.
empty := ABIConfig{}
emptyRes := ABIAnalyze(ft, empty)
empty := &abi.ABIConfig{}
emptyRes := abi.ABIAnalyze(ft, empty)
emptyResString := emptyRes.String()
// Walk the results and make sure the offsets assigned match
// up with those assiged by dowidth. This checks to make sure that
// up with those assiged by CalcSize. This checks to make sure that
// when we have no available registers the ABI assignment degenerates
// back to the original ABI0.
@ -135,18 +136,18 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
rfsl := ft.Recvs().Fields().Slice()
poff := 0
if len(rfsl) != 0 {
failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.inparams[0], "receiver", 0)
failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.InParams()[0], "receiver", 0)
poff = 1
}
// params
pfsl := ft.Params().Fields().Slice()
for k, f := range pfsl {
verifyParamResultOffset(t, f, emptyRes.inparams[k+poff], "param", k)
verifyParamResultOffset(t, f, emptyRes.InParams()[k+poff], "param", k)
}
// results
ofsl := ft.Results().Fields().Slice()
for k, f := range ofsl {
failed |= verifyParamResultOffset(t, f, emptyRes.outparams[k], "result", k)
failed |= verifyParamResultOffset(t, f, emptyRes.OutParams()[k], "result", k)
}
if failed != 0 {

View file

@ -53,7 +53,7 @@ func G(x *A, n int) {
return
}
// Address-taken local of type A, which will insure that the
// compiler's dtypesym() routine will create a method wrapper.
// compiler's writeType() routine will create a method wrapper.
var a, b A
a.next = x
a.prev = &b

View file

@ -15,7 +15,7 @@ import (
// on future calls with the same type t.
func Set(t *types.Type, off int64, bv bitvec.BitVec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
base.Fatalf("typebits.Set: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
if !t.HasPointers() {
// Note: this case ensures that pointers to go:notinheap types
@ -26,14 +26,14 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) {
switch t.Kind() {
case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
base.Fatalf("typebits.Set: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(types.PtrSize))) // pointer
case types.TSTRING:
// struct { byte *str; intgo len; }
if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
base.Fatalf("typebits.Set: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot
@ -42,7 +42,7 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) {
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
base.Fatalf("typebits.Set: invalid alignment, %v", t)
}
// The first word of an interface is a pointer, but we don't
// treat it as such.
@ -61,7 +61,7 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) {
case types.TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
base.Fatalf("typebits.Set: invalid TARRAY alignment, %v", t)
}
bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer)
@ -82,6 +82,6 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) {
}
default:
base.Fatalf("onebitwalktype1: unexpected type, %v", t)
base.Fatalf("typebits.Set: unexpected type, %v", t)
}
}

View file

@ -623,7 +623,7 @@ func OrigInt(n ir.Node, v int64) ir.Node {
return OrigConst(n, constant.MakeInt64(v))
}
// defaultlit on both nodes simultaneously;
// DefaultLit on both nodes simultaneously;
// if they're both ideal going in they better
// get the same type going out.
// force means must assign concrete (non-ideal) type.

View file

@ -41,7 +41,7 @@ func Declare(n *ir.Name, ctxt ir.Class) {
s := n.Sym()
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
// kludgy: TypecheckAllowed means we're past parsing. Eg reflectdata.methodWrapper may declare out of package names later.
if !inimport && !TypecheckAllowed && s.Pkg != types.LocalPkg {
base.ErrorfAt(n.Pos(), "cannot declare name %v", s)
}
@ -311,7 +311,7 @@ func FakeRecv() *types.Field {
var fakeRecvField = FakeRecv
var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
var funcStack []funcStackEnt // stack of previous values of ir.CurFunc/DeclContext
type funcStackEnt struct {
curfn *ir.Func
@ -401,14 +401,14 @@ func Temp(t *types.Type) *ir.Name {
// make a new Node off the books
func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
if curfn == nil {
base.Fatalf("no curfn for tempAt")
base.Fatalf("no curfn for TempAt")
}
if curfn.Op() == ir.OCLOSURE {
ir.Dump("tempAt", curfn)
base.Fatalf("adding tempAt to wrong closure function")
ir.Dump("TempAt", curfn)
base.Fatalf("adding TempAt to wrong closure function")
}
if t == nil {
base.Fatalf("tempAt called with nil type")
base.Fatalf("TempAt called with nil type")
}
if t.Kind() == types.TFUNC && t.Recv() != nil {
base.Fatalf("misuse of method type: %v", t)

View file

@ -68,7 +68,7 @@ func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
return l, r, nil
}
// no defaultlit for left
// no DefaultLit for left
// the outer context gives the type
t = l.Type()
if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
@ -201,7 +201,7 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type)
// n.Left = tcCompLit(n.Left)
func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
if base.EnableTrace && base.Flag.LowerT {
defer tracePrint("typecheckcomplit", n)(&res)
defer tracePrint("tcCompLit", n)(&res)
}
lno := base.Pos
@ -838,7 +838,7 @@ func tcStar(n *ir.StarExpr, top int) ir.Node {
}
if l.Op() == ir.OTYPE {
n.SetOTYPE(types.NewPtr(l.Type()))
// Ensure l.Type gets dowidth'd for the backend. Issue 20174.
// Ensure l.Type gets CalcSize'd for the backend. Issue 20174.
types.CheckSize(l.Type())
return n
}

View file

@ -100,7 +100,7 @@ func PartialCallType(n *ir.SelectorExpr) *types.Type {
return t
}
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// Lazy typechecking of imported bodies. For local functions, CanInline will set ->typecheck
// because they're a copy of an already checked body.
func ImportedBody(fn *ir.Func) {
lno := ir.SetPos(fn.Nname)
@ -122,14 +122,14 @@ func ImportedBody(fn *ir.Func) {
ImportBody(fn)
// typecheckinl is only for imported functions;
// Stmts(fn.Inl.Body) below is only for imported functions;
// their bodies may refer to unsafe as long as the package
// was marked safe during import (which was checked then).
// the ->inl of a local function has been typechecked before caninl copied it.
// the ->inl of a local function has been typechecked before CanInline copied it.
pkg := fnpkg(fn.Nname)
if pkg == types.LocalPkg || pkg == nil {
return // typecheckinl on local function
return // ImportedBody on local function
}
if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
@ -141,11 +141,11 @@ func ImportedBody(fn *ir.Func) {
Stmts(fn.Inl.Body)
ir.CurFunc = savefn
// During expandInline (which imports fn.Func.Inl.Body),
// declarations are added to fn.Func.Dcl by funcHdr(). Move them
// During ImportBody (which imports fn.Func.Inl.Body),
// declarations are added to fn.Func.Dcl by funcBody(). Move them
// to fn.Func.Inl.Dcl for consistency with how local functions
// behave. (Append because typecheckinl may be called multiple
// times.)
// behave. (Append because ImportedBody may be called multiple
// times on same fn.)
fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...)
fn.Dcl = nil
@ -296,15 +296,22 @@ func tcClosure(clo *ir.ClosureExpr, top int) {
fn.SetClosureCalled(top&ctxCallee != 0)
// Do not typecheck fn twice, otherwise, we will end up pushing
// fn to Target.Decls multiple times, causing initLSym called twice.
// fn to Target.Decls multiple times, causing InitLSym called twice.
// See #30709
if fn.Typecheck() == 1 {
clo.SetType(fn.Type())
return
}
fn.Nname.SetSym(ClosureName(ir.CurFunc))
ir.MarkFunc(fn.Nname)
// Don't give a name and add to xtop if we are typechecking an inlined
// body in ImportedBody(), since we only want to create the named function
// when the closure is actually inlined (and then we force a typecheck
// explicitly in (*inlsubst).node()).
inTypeCheckInl := ir.CurFunc != nil && ir.CurFunc.Body == nil
if !inTypeCheckInl {
fn.Nname.SetSym(ClosureName(ir.CurFunc))
ir.MarkFunc(fn.Nname)
}
Func(fn)
clo.SetType(fn.Type())
@ -338,15 +345,22 @@ func tcClosure(clo *ir.ClosureExpr, top int) {
}
fn.ClosureVars = fn.ClosureVars[:out]
Target.Decls = append(Target.Decls, fn)
if base.Flag.W > 1 {
s := fmt.Sprintf("New closure func: %s", ir.FuncName(fn))
ir.Dump(s, fn)
}
if !inTypeCheckInl {
// Add function to xtop once only when we give it a name
Target.Decls = append(Target.Decls, fn)
}
}
// type check function definition
// To be called by typecheck, not directly.
// (Call typecheckFunc instead.)
// (Call typecheck.Func instead.)
func tcFunc(n *ir.Func) {
if base.EnableTrace && base.Flag.LowerT {
defer tracePrint("typecheckfunc", n)(nil)
defer tracePrint("tcFunc", n)(nil)
}
n.Nname = AssignExpr(n.Nname).(*ir.Name)
@ -896,7 +910,7 @@ func tcNew(n *ir.UnaryExpr) ir.Node {
// tcPanic typechecks an OPANIC node.
func tcPanic(n *ir.UnaryExpr) ir.Node {
n.X = Expr(n.X)
n.X = DefaultLit(n.X, types.Types[types.TINTER])
n.X = AssignConv(n.X, types.Types[types.TINTER], "argument to panic")
if n.X.Type() == nil {
n.SetType(nil)
return n

View file

@ -423,9 +423,13 @@ type exportWriter struct {
prevLine int64
prevColumn int64
// dclIndex maps function-scoped declarations to their index
// within their respective Func's Dcl list.
dclIndex map[*ir.Name]int
// dclIndex maps function-scoped declarations to an int used to refer to
// them later in the function. For local variables/params, the int is
// non-negative and in order of the appearance in the Func's Dcl list. For
// closure variables, the index is negative starting at -2.
dclIndex map[*ir.Name]int
maxDclIndex int
maxClosureVarIndex int
}
func (p *iexporter) doDecl(n *ir.Name) {
@ -976,6 +980,9 @@ func (w *exportWriter) funcExt(n *ir.Name) {
w.linkname(n.Sym())
w.symIdx(n.Sym())
// TODO remove after register abi is working.
w.uint64(uint64(n.Func.Pragma))
// Escape analysis.
for _, fs := range &types.RecvsParams {
for _, f := range fs(n.Type()).FieldSlice() {
@ -1035,14 +1042,19 @@ func (w *exportWriter) typeExt(t *types.Type) {
// Inline bodies.
func (w *exportWriter) funcBody(fn *ir.Func) {
w.int64(int64(len(fn.Inl.Dcl)))
for i, n := range fn.Inl.Dcl {
func (w *exportWriter) writeNames(dcl []*ir.Name) {
w.int64(int64(len(dcl)))
for i, n := range dcl {
w.pos(n.Pos())
w.localIdent(n.Sym())
w.typ(n.Type())
w.dclIndex[n] = i
w.dclIndex[n] = w.maxDclIndex + i
}
w.maxDclIndex += len(dcl)
}
func (w *exportWriter) funcBody(fn *ir.Func) {
w.writeNames(fn.Inl.Dcl)
w.stmtList(fn.Inl.Body)
}
@ -1312,8 +1324,30 @@ func (w *exportWriter) expr(n ir.Node) {
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// should have been resolved by typechecking - handled by default case
// case OCLOSURE:
// unimplemented - handled by default case
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
w.op(ir.OCLOSURE)
w.pos(n.Pos())
w.signature(n.Type())
// Write out id for the Outer of each conditional variable. The
// conditional variable itself for this closure will be re-created
// during import.
w.int64(int64(len(n.Func.ClosureVars)))
for i, cv := range n.Func.ClosureVars {
w.pos(cv.Pos())
w.localName(cv.Outer)
// Closure variable (which will be re-created during
// import) is given via a negative id, starting at -2,
// which is used to refer to it later in the function
// during export. -1 represents blanks.
w.dclIndex[cv] = -(i + 2) - w.maxClosureVarIndex
}
w.maxClosureVarIndex += len(n.Func.ClosureVars)
// like w.funcBody(n.Func), but not for .Inl
w.writeNames(n.Func.Dcl)
w.stmtList(n.Func.Body)
// case OCOMPLIT:
// should have been resolved by typechecking - handled by default case

View file

@ -37,7 +37,7 @@ var (
// and offset where that identifier's declaration can be read.
DeclImporter = map[*types.Sym]iimporterAndOffset{}
// inlineImporter is like declImporter, but for inline bodies
// inlineImporter is like DeclImporter, but for inline bodies
// for function and method symbols.
inlineImporter = map[*types.Sym]iimporterAndOffset{}
)
@ -265,6 +265,9 @@ type importReader struct {
// curfn is the current function we're importing into.
curfn *ir.Func
// Slice of all dcls for function, including any interior closures
allDcls []*ir.Name
allClosureVars []*ir.Name
}
func (p *iimporter) newReader(off uint64, pkg *types.Pkg) *importReader {
@ -334,7 +337,7 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
recv := r.param()
mtyp := r.signature(recv)
// methodSym already marked m.Sym as a function.
// MethodSym already marked m.Sym as a function.
m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym))
m.Class = ir.PFUNC
m.SetType(mtyp)
@ -647,6 +650,9 @@ func (r *importReader) funcExt(n *ir.Name) {
r.linkname(n.Sym())
r.symIdx(n.Sym())
// TODO remove after register abi is working
n.SetPragma(ir.PragmaFlag(r.uint64()))
// Escape analysis.
for _, fs := range &types.RecvsParams {
for _, f := range fs(n.Type()).FieldSlice() {
@ -718,6 +724,7 @@ func (r *importReader) doInline(fn *ir.Func) {
base.Fatalf("%v already has inline body", fn)
}
//fmt.Printf("Importing %v\n", n)
r.funcBody(fn)
importlist = append(importlist, fn)
@ -751,6 +758,24 @@ func (r *importReader) funcBody(fn *ir.Func) {
r.curfn = fn
// Import local declarations.
fn.Inl.Dcl = r.readFuncDcls(fn)
// Import function body.
body := r.stmtList()
if body == nil {
// Make sure empty body is not interpreted as
// no inlineable body (see also parser.fnbody)
// (not doing so can cause significant performance
// degradation due to unnecessary calls to empty
// functions).
body = []ir.Node{}
}
fn.Inl.Body = body
r.curfn = outerfn
}
func (r *importReader) readNames(fn *ir.Func) []*ir.Name {
dcls := make([]*ir.Name, r.int64())
for i := range dcls {
n := ir.NewDeclNameAt(r.pos(), ir.ONAME, r.localIdent())
@ -759,7 +784,12 @@ func (r *importReader) funcBody(fn *ir.Func) {
n.SetType(r.typ())
dcls[i] = n
}
fn.Inl.Dcl = dcls
r.allDcls = append(r.allDcls, dcls...)
return dcls
}
func (r *importReader) readFuncDcls(fn *ir.Func) []*ir.Name {
dcls := r.readNames(fn)
// Fixup parameter classes and associate with their
// signature's type fields.
@ -784,28 +814,18 @@ func (r *importReader) funcBody(fn *ir.Func) {
for _, f := range typ.Results().FieldSlice() {
fix(f, ir.PPARAMOUT)
}
// Import function body.
body := r.stmtList()
if body == nil {
// Make sure empty body is not interpreted as
// no inlineable body (see also parser.fnbody)
// (not doing so can cause significant performance
// degradation due to unnecessary calls to empty
// functions).
body = []ir.Node{}
}
fn.Inl.Body = body
r.curfn = outerfn
return dcls
}
func (r *importReader) localName() *ir.Name {
i := r.int64()
if i < 0 {
if i == -1 {
return ir.BlankNode.(*ir.Name)
}
return r.curfn.Inl.Dcl[i]
if i < 0 {
return r.allClosureVars[-i-2]
}
return r.allDcls[i]
}
func (r *importReader) stmtList() []ir.Node {
@ -921,8 +941,38 @@ func (r *importReader) node() ir.Node {
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// unreachable - should have been resolved by typechecking
// case OCLOSURE:
// unimplemented
case ir.OCLOSURE:
//println("Importing CLOSURE")
pos := r.pos()
typ := r.signature(nil)
// All the remaining code below is similar to (*noder).funcLit(), but
// with Dcls and ClosureVars lists already set up
fn := ir.NewFunc(pos)
fn.SetIsHiddenClosure(true)
fn.Nname = ir.NewNameAt(pos, ir.BlankNode.Sym())
fn.Nname.Func = fn
fn.Nname.Ntype = ir.TypeNode(typ)
fn.Nname.Defn = fn
fn.Nname.SetType(typ)
cvars := make([]*ir.Name, r.int64())
for i := range cvars {
cvars[i] = ir.CaptureName(r.pos(), fn, r.localName().Canonical())
}
fn.ClosureVars = cvars
r.allClosureVars = append(r.allClosureVars, cvars...)
fn.Dcl = r.readFuncDcls(fn)
body := r.stmtList()
ir.FinishCaptureNames(pos, r.curfn, fn)
clo := ir.NewClosureExpr(pos, fn)
fn.OClosure = clo
fn.Body = body
return clo
// case OPTRLIT:
// unreachable - mapped to case OADDR below by exporter

View file

@ -25,7 +25,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) {
}
t := RangeExprType(n.X.Type())
// delicate little dance. see typecheckas2
// delicate little dance. see tcAssignList
if n.Key != nil && !ir.DeclaredBy(n.Key, n) {
n.Key = AssignExpr(n.Key)
}
@ -90,7 +90,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) {
// fill in the var's type.
func tcAssign(n *ir.AssignStmt) {
if base.EnableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas", n)(nil)
defer tracePrint("tcAssign", n)(nil)
}
if n.Y == nil {
@ -110,7 +110,7 @@ func tcAssign(n *ir.AssignStmt) {
func tcAssignList(n *ir.AssignListStmt) {
if base.EnableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas2", n)(nil)
defer tracePrint("tcAssignList", n)(nil)
}
assign(n, n.Lhs, n.Rhs)
@ -119,7 +119,7 @@ func tcAssignList(n *ir.AssignListStmt) {
func assign(stmt ir.Node, lhs, rhs []ir.Node) {
// delicate little dance.
// the definition of lhs may refer to this assignment
// as its definition, in which case it will call typecheckas.
// as its definition, in which case it will call tcAssign.
// in that case, do not call typecheck back, or it will cycle.
// if the variable has a type (ntype) then typechecking
// will not look at defn, so it is okay (and desirable,

View file

@ -81,7 +81,7 @@ func markAddrOf(n ir.Node) ir.Node {
// main typecheck has completed.
// The argument to OADDR needs to be typechecked because &x[i] takes
// the address of x if x is an array, but not if x is a slice.
// Note: outervalue doesn't work correctly until n is typechecked.
// Note: OuterValue doesn't work correctly until n is typechecked.
n = typecheck(n, ctxExpr)
if x := ir.OuterValue(n); x.Op() == ir.ONAME {
x.Name().SetAddrtaken(true)
@ -367,10 +367,10 @@ func assignop(src, dst *types.Type) (ir.Op, string) {
var missing, have *types.Field
var ptr int
if implements(src, dst, &missing, &have, &ptr) {
// Call itabname so that (src, dst)
// Call NeedITab/ITabAddr so that (src, dst)
// gets added to itabs early, which allows
// us to de-virtualize calls through this
// type/interface pair later. See peekitabs in reflect.go
// type/interface pair later. See CompileITabs in reflect.go
if types.IsDirectIface(src) && !dst.IsEmptyInterface() {
NeedITab(src, dst)
}
@ -440,7 +440,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) {
}
}
// 6. rule about untyped constants - already converted by defaultlit.
// 6. rule about untyped constants - already converted by DefaultLit.
// 7. Any typed value can be assigned to the blank identifier.
if dst.Kind() == types.TBLANK {
@ -834,7 +834,7 @@ func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool)
var slist []symlink
// Code to help generate trampoline functions for methods on embedded
// types. These are approx the same as the corresponding adddot
// types. These are approx the same as the corresponding AddImplicitDots
// routines except that they expect to be called with unique tasks and
// they return the actual methods.

View file

@ -15,7 +15,7 @@ import (
func LookupRuntime(name string) *ir.Name {
s := ir.Pkgs.Runtime.Lookup(name)
if s == nil || s.Def == nil {
base.Fatalf("syslook: can't find runtime.%s", name)
base.Fatalf("LookupRuntime: can't find runtime.%s", name)
}
return ir.AsNode(s.Def).(*ir.Name)
}
@ -33,7 +33,7 @@ func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
n.Class = old.Class
n.SetType(types.SubstAny(old.Type(), &types_))
if len(types_) > 0 {
base.Fatalf("substArgTypes: too many argument types")
base.Fatalf("SubstArgTypes: too many argument types")
}
return n
}
@ -86,14 +86,17 @@ func InitRuntime() {
// LookupRuntimeFunc looks up Go function name in package runtime. This function
// must follow the internal calling convention.
func LookupRuntimeFunc(name string) *obj.LSym {
s := ir.Pkgs.Runtime.Lookup(name)
s.SetFunc(true)
return s.Linksym()
return LookupRuntimeABI(name, obj.ABIInternal)
}
// LookupRuntimeVar looks up a variable (or assembly function) name in package
// runtime. If this is a function, it may have a special calling
// convention.
func LookupRuntimeVar(name string) *obj.LSym {
return ir.Pkgs.Runtime.Lookup(name).Linksym()
return LookupRuntimeABI(name, obj.ABI0)
}
// LookupRuntimeABI looks up a name in package runtime using the given ABI.
func LookupRuntimeABI(name string, abi obj.ABI) *obj.LSym {
return base.PkgLinksym("runtime", name, abi)
}

View file

@ -456,7 +456,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) {
}
// indexlit implements typechecking of untyped values as
// array/slice indexes. It is almost equivalent to defaultlit
// array/slice indexes. It is almost equivalent to DefaultLit
// but also accepts untyped numeric values representable as
// value of type int (see also checkmake for comparison).
// The result of indexlit MUST be assigned back to n, e.g.
@ -521,7 +521,7 @@ func typecheck1(n ir.Node, top int) ir.Node {
}
return n
case ir.ONAMEOFFSET:
case ir.OLINKSYMOFFSET:
// type already set
return n
@ -857,8 +857,8 @@ func typecheck1(n ir.Node, top int) ir.Node {
n := n.(*ir.ReturnStmt)
return tcReturn(n)
case ir.ORETJMP:
n := n.(*ir.BranchStmt)
case ir.OTAILCALL:
n := n.(*ir.TailCallStmt)
return n
case ir.OSELECT:
@ -938,7 +938,7 @@ func typecheckargs(n ir.InitNode) {
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
// init.go hasn't yet created it. Instead, associate the
// temporary variables with initTodo for now, and init.go
// temporary variables with InitTodoFunc for now, and init.go
// will reassociate them later when it's appropriate.
static := ir.CurFunc == nil
if static {
@ -1890,7 +1890,7 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool {
return false
}
// Do range checks for constants before defaultlit
// Do range checks for constants before DefaultLit
// to avoid redundant "constant NNN overflows int" errors.
if n.Op() == ir.OLITERAL {
v := toint(n.Val())
@ -1904,7 +1904,7 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool {
}
}
// defaultlit is necessary for non-constants too: n might be 1.1<<k.
// DefaultLit is necessary for non-constants too: n might be 1.1<<k.
// TODO(gri) The length argument requirements for (array/slice) make
// are the same as for index expressions. Factor the code better;
// for instance, indexlit might be called here and incorporate some
@ -2023,7 +2023,7 @@ func isTermNode(n ir.Node) bool {
n := n.(*ir.BlockStmt)
return isTermNodes(n.List)
case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL:
case ir.OGOTO, ir.ORETURN, ir.OTAILCALL, ir.OPANIC, ir.OFALL:
return true
case ir.OFOR, ir.OFORUNTIL:

View file

@ -132,7 +132,7 @@ func AlgType(t *Type) (AlgKind, *Type) {
return ret, nil
}
base.Fatalf("algtype: unexpected type %v", t)
base.Fatalf("AlgType: unexpected type %v", t)
return 0, nil
}
@ -163,7 +163,7 @@ func IncomparableField(t *Type) *Field {
// by padding.
func IsPaddedField(t *Type, i int) bool {
if !t.IsStruct() {
base.Fatalf("ispaddedfield called non-struct %v", t)
base.Fatalf("IsPaddedField called non-struct %v", t)
}
end := t.Width
if i+1 < t.NumFields() {

View file

@ -44,7 +44,7 @@ func OrigSym(s *Sym) *Sym {
}
if strings.HasPrefix(s.Name, ".anon") {
// originally an unnamed or _ name (see subr.go: structargs)
// originally an unnamed or _ name (see subr.go: NewFuncParams)
return nil
}

View file

@ -58,7 +58,7 @@ func typePos(t *Type) src.XPos {
var MaxWidth int64
// CalcSizeDisabled indicates whether it is safe
// to calculate Types' widths and alignments. See dowidth.
// to calculate Types' widths and alignments. See CalcSize.
var CalcSizeDisabled bool
// machine size and rounding alignment is dictated around
@ -135,7 +135,7 @@ func expandiface(t *Type) {
m.Offset = int64(i) * int64(PtrSize)
}
// Access fields directly to avoid recursively calling dowidth
// Access fields directly to avoid recursively calling CalcSize
// within Type.Fields().
t.Extra.(*Interface).Fields.Set(methods)
}
@ -164,7 +164,7 @@ func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
f.Offset = o
if f.Nname != nil {
// addrescapes has similar code to update these offsets.
// Usually addrescapes runs after widstruct,
// Usually addrescapes runs after calcStructOffset,
// in which case we could drop this,
// but function closure functions are the exception.
// NOTE(rsc): This comment may be stale.
@ -306,17 +306,16 @@ func reportTypeLoop(t *Type) {
}
// CalcSize calculates and stores the size and alignment for t.
// If sizeCalculationDisabled is set, and the size/alignment
// If CalcSizeDisabled is set, and the size/alignment
// have not already been calculated, it calls Fatal.
// This is used to prevent data races in the back end.
func CalcSize(t *Type) {
// Calling dowidth when typecheck tracing enabled is not safe.
// Calling CalcSize when typecheck tracing enabled is not safe.
// See issue #33658.
if base.EnableTrace && SkipSizeForTracing {
return
}
if PtrSize == 0 {
// Assume this is a test.
return
}
@ -351,7 +350,7 @@ func CalcSize(t *Type) {
return
}
// defer checkwidth calls until after we're done
// defer CheckSize calls until after we're done
DeferCheckSize()
lno := base.Pos
@ -367,7 +366,7 @@ func CalcSize(t *Type) {
case TFUNC, TCHAN, TMAP, TSTRING:
break
// simtype == 0 during bootstrap
// SimType == 0 during bootstrap
default:
if SimType[t.Kind()] != 0 {
et = SimType[t.Kind()]
@ -377,7 +376,7 @@ func CalcSize(t *Type) {
var w int64
switch et {
default:
base.Fatalf("dowidth: unknown type: %v", t)
base.Fatalf("CalcSize: unknown type: %v", t)
// compiler-specific stuff
case TINT8, TUINT8, TBOOL:
@ -443,11 +442,11 @@ func CalcSize(t *Type) {
case TANY:
// not a real type; should be replaced before use.
base.Fatalf("dowidth any")
base.Fatalf("CalcSize any")
case TSTRING:
if StringSize == 0 {
base.Fatalf("early dowidth string")
base.Fatalf("early CalcSize string")
}
w = StringSize
t.Align = uint8(PtrSize)
@ -477,7 +476,7 @@ func CalcSize(t *Type) {
case TSTRUCT:
if t.IsFuncArgStruct() {
base.Fatalf("dowidth fn struct %v", t)
base.Fatalf("CalcSize fn struct %v", t)
}
w = calcStructOffset(t, t, 0, 1)
@ -526,7 +525,7 @@ func CalcStructSize(s *Type) {
s.Width = calcStructOffset(s, s, 0, 1) // sets align
}
// when a type's width should be known, we call checkwidth
// when a type's width should be known, we call CheckSize
// to compute it. during a declaration like
//
// type T *struct { next T }
@ -535,11 +534,11 @@ func CalcStructSize(s *Type) {
// until after T has been initialized to be a pointer to that struct.
// similarly, during import processing structs may be used
// before their definition. in those situations, calling
// defercheckwidth() stops width calculations until
// resumecheckwidth() is called, at which point all the
// checkwidths that were deferred are executed.
// dowidth should only be called when the type's size
// is needed immediately. checkwidth makes sure the
// DeferCheckSize() stops width calculations until
// ResumeCheckSize() is called, at which point all the
// CalcSizes that were deferred are executed.
// CalcSize should only be called when the type's size
// is needed immediately. CheckSize makes sure the
// size is evaluated eventually.
var deferredTypeStack []*Type
@ -552,7 +551,7 @@ func CheckSize(t *Type) {
// function arg structs should not be checked
// outside of the enclosing function.
if t.IsFuncArgStruct() {
base.Fatalf("checkwidth %v", t)
base.Fatalf("CheckSize %v", t)
}
if defercalc == 0 {
@ -606,7 +605,7 @@ func PtrDataSize(t *Type) int64 {
case TINTER:
// struct { Itab *tab; void *data; } or
// struct { Type *type; void *data; }
// Note: see comment in plive.go:onebitwalktype1.
// Note: see comment in typebits.Set
return 2 * int64(PtrSize)
case TSLICE:
@ -628,7 +627,7 @@ func PtrDataSize(t *Type) int64 {
return lastPtrField.Offset + PtrDataSize(lastPtrField.Type)
default:
base.Fatalf("typeptrdata: unexpected type, %v", t)
base.Fatalf("PtrDataSize: unexpected type, %v", t)
return 0
}
}

Some files were not shown because too many files have changed in this diff Show more