test/codegen: port various mem-combining tests

And delete them from asm_test.

Change-Id: I0e33d58274951ab5acb67b0117b60ef617ea887a
Reviewed-on: https://go-review.googlesource.com/105735
Run-TryBot: Alberto Donizetti <alb.donizetti@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Daniel Martí <mvdan@mvdan.cc>
This commit is contained in:
Alberto Donizetti 2018-04-09 09:52:40 +02:00
parent 8818b4d27e
commit 54c3f56ee0
2 changed files with 34 additions and 55 deletions

View file

@ -259,26 +259,6 @@ var linuxAMD64Tests = []*asmTest{
`,
pos: []string{"\tSHLQ\t\\$5,", "\tLEAQ\t\\(.*\\)\\(.*\\*2\\),"},
},
// see issue 19595.
// We want to merge load+op in f58, but not in f59.
{
fn: `
func f58(p, q *int) {
x := *p
*q += x
}`,
pos: []string{"\tADDQ\t\\("},
},
{
fn: `
func f59(p, q *int) {
x := *p
for i := 0; i < 10; i++ {
*q += x
}
}`,
pos: []string{"\tADDQ\t[A-Z]"},
},
{
// make sure assembly output has matching offset and base register.
fn: `
@ -289,31 +269,6 @@ var linuxAMD64Tests = []*asmTest{
`,
pos: []string{"b\\+24\\(SP\\)"},
},
{
// check load combining
fn: `
func f73(a, b byte) (byte,byte) {
return f73(f73(a,b))
}
`,
pos: []string{"\tMOVW\t"},
},
{
fn: `
func f74(a, b uint16) (uint16,uint16) {
return f74(f74(a,b))
}
`,
pos: []string{"\tMOVL\t"},
},
{
fn: `
func f75(a, b uint32) (uint32,uint32) {
return f75(f75(a,b))
}
`,
pos: []string{"\tMOVQ\t"},
},
// Make sure we don't put pointers in SSE registers across safe points.
{
fn: `
@ -384,16 +339,6 @@ var linuxARM64Tests = []*asmTest{
`,
pos: []string{"\tAND\t"},
},
{
// make sure offsets are folded into load and store.
fn: `
func f36(_, a [20]byte) (b [20]byte) {
b = a
return
}
`,
pos: []string{"\tMOVD\t\"\"\\.a\\+[0-9]+\\(FP\\), R[0-9]+", "\tMOVD\tR[0-9]+, \"\"\\.b\\+[0-9]+\\(FP\\)"},
},
{
// check that we don't emit comparisons for constant shift
fn: `

View file

@ -98,6 +98,40 @@ func load_be16_idx(b []byte, idx int) {
sink16 = binary.BigEndian.Uint16(b[idx:])
}
// Check load combining across function calls.
func fcall_byte(a, b byte) (byte, byte) {
return fcall_byte(fcall_byte(a, b)) // amd64:`MOVW`
}
func fcall_uint16(a, b uint16) (uint16, uint16) {
return fcall_uint16(fcall_uint16(a, b)) // amd64:`MOVL`
}
func fcall_uint32(a, b uint32) (uint32, uint32) {
return fcall_uint32(fcall_uint32(a, b)) // amd64:`MOVQ`
}
// We want to merge load+op in the first function, but not in the
// second. See Issue 19595.
func load_op_merge(p, q *int) {
x := *p
*q += x // amd64:`ADDQ\t\(`
}
func load_op_no_merge(p, q *int) {
x := *p
for i := 0; i < 10; i++ {
*q += x // amd64:`ADDQ\t[A-Z]`
}
}
// Make sure offsets are folded into loads and stores.
func offsets_fold(_, a [20]byte) (b [20]byte) {
// arm64:`MOVD\t""\.a\+[0-9]+\(FP\), R[0-9]+`,`MOVD\tR[0-9]+, ""\.b\+[0-9]+\(FP\)`
b = a
return
}
// ------------- //
// Storing //
// ------------- //