diff --git a/CHANGELOG.md b/CHANGELOG.md index 190a9f373c8..413c8d2a8c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ occur sooner than specified in 'timeout'. ### Dart VM +* Support for MIPS has been remvoed. ### Tool Changes diff --git a/build/compiler_version.py b/build/compiler_version.py deleted file mode 100755 index 05faf54454d..00000000000 --- a/build/compiler_version.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Compiler version checking tool for gcc - -Print gcc version as XY if you are running gcc X.Y.*. -This is used to tweak build flags for gcc 4.4. -""" - -import os -import re -import subprocess -import sys - - -compiler_version_cache = {} # Map from (compiler, tool) -> version. - - -def Usage(program_name): - print '%s MODE TOOL' % os.path.basename(program_name) - print 'MODE: host or target.' - print 'TOOL: assembler or compiler or linker.' - return 1 - - -def ParseArgs(args): - if len(args) != 2: - raise Exception('Invalid number of arguments') - mode = args[0] - tool = args[1] - if mode not in ('host', 'target'): - raise Exception('Invalid mode: %s' % mode) - if tool not in ('assembler', 'compiler', 'linker'): - raise Exception('Invalid tool: %s' % tool) - return mode, tool - - -def GetEnvironFallback(var_list, default): - """Look up an environment variable from a possible list of variable names.""" - for var in var_list: - if var in os.environ: - return os.environ[var] - return default - - -def GetVersion(compiler, tool): - tool_output = tool_error = None - cache_key = (compiler, tool) - cached_version = compiler_version_cache.get(cache_key) - if cached_version: - return cached_version - try: - # Note that compiler could be something tricky like "distcc g++". - if tool == "compiler": - compiler = compiler + " -dumpversion" - # 4.6 - version_re = re.compile(r"(\d+)\.(\d+)") - elif tool == "assembler": - compiler = compiler + " -Xassembler --version -x assembler -c /dev/null" - # Unmodified: GNU assembler (GNU Binutils) 2.24 - # Ubuntu: GNU assembler (GNU Binutils for Ubuntu) 2.22 - # Fedora: GNU assembler version 2.23.2 - version_re = re.compile(r"^GNU [^ ]+ .* (\d+).(\d+).*?$", re.M) - elif tool == "linker": - compiler = compiler + " -Xlinker --version" - # Using BFD linker - # Unmodified: GNU ld (GNU Binutils) 2.24 - # Ubuntu: GNU ld (GNU Binutils for Ubuntu) 2.22 - # Fedora: GNU ld version 2.23.2 - # Using Gold linker - # Unmodified: GNU gold (GNU Binutils 2.24) 1.11 - # Ubuntu: GNU gold (GNU Binutils for Ubuntu 2.22) 1.11 - # Fedora: GNU gold (version 2.23.2) 1.11 - version_re = re.compile(r"^GNU [^ ]+ .* (\d+).(\d+).*?$", re.M) - else: - raise Exception("Unknown tool %s" % tool) - - # Force the locale to C otherwise the version string could be localized - # making regex matching fail. - env = os.environ.copy() - env["LC_ALL"] = "C" - pipe = subprocess.Popen(compiler, shell=True, env=env, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - tool_output, tool_error = pipe.communicate() - if pipe.returncode: - raise subprocess.CalledProcessError(pipe.returncode, compiler) - - parsed_output = version_re.match(tool_output) - result = parsed_output.group(1) + parsed_output.group(2) - compiler_version_cache[cache_key] = result - return result - except Exception, e: - if tool_error: - sys.stderr.write(tool_error) - print >> sys.stderr, "compiler_version.py failed to execute:", compiler - print >> sys.stderr, e - return "" - - -def main(args): - try: - (mode, tool) = ParseArgs(args[1:]) - except Exception, e: - sys.stderr.write(e.message + '\n\n') - return Usage(args[0]) - - ret_code, result = ExtractVersion(mode, tool) - if ret_code == 0: - print result - return ret_code - - -def DoMain(args): - """Hook to be called from gyp without starting a separate python - interpreter.""" - (mode, tool) = ParseArgs(args) - ret_code, result = ExtractVersion(mode, tool) - if ret_code == 0: - return result - raise Exception("Failed to extract compiler version for args: %s" % args) - - -def ExtractVersion(mode, tool): - # Check if various CXX environment variables exist and use them if they - # exist. The preferences and fallback order is a close approximation of - # GenerateOutputForConfig() in GYP's ninja generator. - # The main difference being not supporting GYP's make_global_settings. - environments = ['CXX_target', 'CXX'] - if mode == 'host': - environments = ['CXX_host'] + environments; - compiler = GetEnvironFallback(environments, 'c++') - - if compiler: - compiler_version = GetVersion(compiler, tool) - if compiler_version != "": - return (0, compiler_version) - return (1, None) - - -if __name__ == "__main__": - sys.exit(main(sys.argv)) diff --git a/build/config/android/config.gni b/build/config/android/config.gni index 38f81e830b1..a6dcf789aab 100644 --- a/build/config/android/config.gni +++ b/build/config/android/config.gni @@ -71,14 +71,10 @@ if (is_android) { "platforms/android-${_android_api_level}/arch-x86" arm_android_sysroot_subdir = "platforms/android-${_android_api_level}/arch-arm" - mips_android_sysroot_subdir = - "platforms/android-${_android_api_level}/arch-mips" x86_64_android_sysroot_subdir = "platforms/android-${_android_api_level}/arch-x86_64" arm64_android_sysroot_subdir = "platforms/android-${_android_api_level}/arch-arm64" - mips64_android_sysroot_subdir = - "platforms/android-${_android_api_level}/arch-mips64" # Toolchain root directory for each build. The actual binaries are inside # a "bin" directory inside of these. @@ -86,10 +82,8 @@ if (is_android) { _android_toolchain_detailed_version = "4.9.x" x86_android_toolchain_root = "$android_ndk_root/toolchains/x86-${_android_toolchain_version}/prebuilt/${android_host_os}-${android_host_arch}" arm_android_toolchain_root = "$android_ndk_root/toolchains/arm-linux-androideabi-${_android_toolchain_version}/prebuilt/${android_host_os}-${android_host_arch}" - mips_android_toolchain_root = "$android_ndk_root/toolchains/mipsel-linux-android-${_android_toolchain_version}/prebuilt/${android_host_os}-${android_host_arch}" x86_64_android_toolchain_root = "$android_ndk_root/toolchains/x86_64-${_android_toolchain_version}/prebuilt/${android_host_os}-${android_host_arch}" arm64_android_toolchain_root = "$android_ndk_root/toolchains/aarch64-linux-android-${_android_toolchain_version}/prebuilt/${android_host_os}-${android_host_arch}" - mips64_android_toolchain_root = "$android_ndk_root/toolchains/mips64el-linux-android-${_android_toolchain_version}/prebuilt/${android_host_os}-${android_host_arch}" # Location of libgcc. This is only needed for the current GN toolchain, so we # only need to define the current one, rather than one for every platform @@ -102,10 +96,6 @@ if (is_android) { android_prebuilt_arch = "android-arm" _binary_prefix = "arm-linux-androideabi" android_toolchain_root = "$arm_android_toolchain_root" - } else if (current_cpu == "mipsel") { - android_prebuilt_arch = "android-mips" - _binary_prefix = "mipsel-linux-android" - android_toolchain_root = "$mips_android_toolchain_root" } else if (current_cpu == "x64") { android_prebuilt_arch = "android-x86_64" _binary_prefix = "x86_64-linux-android" @@ -114,10 +104,6 @@ if (is_android) { android_prebuilt_arch = "android-arm64" _binary_prefix = "aarch64-linux-android" android_toolchain_root = "$arm64_android_toolchain_root" - } else if (current_cpu == "mips64el") { - android_prebuilt_arch = "android-mips64" - _binary_prefix = "mips64el-linux-android" - android_toolchain_root = "$mips64_android_toolchain_root" } else { assert(false, "Need android libgcc support for your target arch.") } @@ -156,14 +142,10 @@ if (is_android) { } else { android_app_abi = "armeabi-v7a" } - } else if (current_cpu == "mipsel") { - android_app_abi = "mips" } else if (current_cpu == "x64") { android_app_abi = "x86_64" } else if (current_cpu == "arm64") { android_app_abi = "arm64-v8a" - } else if (current_cpu == "mips64el") { - android_app_abi = "mips64" } else { assert(false, "Unknown Android ABI: " + current_cpu) } diff --git a/build/config/compiler/BUILD.gn b/build/config/compiler/BUILD.gn index 75fb9dc9152..6370dcdf0d9 100644 --- a/build/config/compiler/BUILD.gn +++ b/build/config/compiler/BUILD.gn @@ -17,12 +17,6 @@ import("//build/config/android/config.gni") if (current_cpu == "arm") { import("//build/config/arm.gni") } -if (current_cpu == "mipsel" || current_cpu == "mips64el") { - import("//build/config/mips.gni") -} -if (is_posix) { - import("//build/config/gcc/gcc_version.gni") -} if (is_win) { import("//build/config/win/visual_studio_version.gni") } @@ -225,72 +219,9 @@ config("compiler") { "-fno-caller-saves", ] } - } else if (current_cpu == "mipsel") { - # Some toolchains default to big-endian. - cflags += [ "-EL" ] - ldflags += [ "-EL" ] - - # We have to explicitly request exceptions to get good heap profiles from - # tcmalloc. - if (is_debug || is_release) { - cflags += [ - "-fexceptions", - "-funwind-tables", - ] - } - - if (mips_arch_variant == "r6") { - cflags += [ - "-mips32r6", - "-Wa,-mips32r6", - ] - if (is_android) { - ldflags += [ - "-mips32r6", - "-Wl,-melf32ltsmip", - ] - } - } else if (mips_arch_variant == "r2") { - cflags += [ - "-mips32r2", - "-Wa,-mips32r2", - ] - if (mips_float_abi == "hard" && mips_fpu_mode != "") { - cflags += [ "-m$mips_fpu_mode" ] - } - } else if (mips_arch_variant == "r1") { - cflags += [ - "-mips32", - "-Wa,-mips32", - ] - } - - if (mips_dsp_rev == 1) { - cflags += [ "-mdsp" ] - } else if (mips_dsp_rev == 2) { - cflags += [ "-mdspr2" ] - } - - cflags += [ "-m${mips_float_abi}-float" ] - } else if (current_cpu == "mips64el") { - if (mips_arch_variant == "r6") { - cflags += [ - "-mips64r6", - "-Wa,-mips64r6", - ] - ldflags += [ "-mips64r6" ] - } else if (mips_arch_variant == "r2") { - cflags += [ - "-mips64r2", - "-Wa,-mips64r2", - ] - ldflags += [ "-mips64r2" ] - } } - if (current_cpu != "mipsel") { - cflags += [ "-fno-exceptions" ] - } + cflags += [ "-fno-exceptions" ] } # Linux/Android common flags setup. @@ -304,12 +235,12 @@ config("compiler") { } # We need -fPIC: - # 1. On ARM and MIPS for tcmalloc. + # 1. On ARM for tcmalloc. # 2. On Android. # 3. When using the sanitizers. # Otherwise there is a performance hit, in particular on ia32. if (is_android || is_asan || is_lsan || is_msan || is_tsan || - (is_linux && (current_cpu == "arm" || current_cpu == "mipsel"))) { + (is_linux && current_cpu == "arm")) { cflags += [ "-fPIC" ] ldflags += [ "-fPIC" ] } @@ -447,19 +378,15 @@ config("runtime_library") { if (is_android) { if (is_clang) { # Work around incompatibilities between bionic and clang headers. - defines += [ - "__compiler_offsetof=__builtin_offsetof", - ] + defines += [ "__compiler_offsetof=__builtin_offsetof" ] } defines += [ "__GNU_SOURCE=1" ] # Necessary for clone(). - # TODO(jdduke) Re-enable on mips after resolving linking - # issues with libc++ (crbug.com/456380). - if (current_cpu != "mipsel" && current_cpu != "mips64el") { - ldflags += [ "-Wl,--warn-shared-textrel" ] - } - ldflags += [ "-nostdlib" ] + ldflags += [ + "-Wl,--warn-shared-textrel", + "-nostdlib", + ] # NOTE: The libc++ header include paths below are specified in cflags # rather than include_dirs because they need to come after include_dirs. @@ -572,24 +499,11 @@ if (is_win) { "-Wno-type-limits", ] default_warning_flags_cc += [ - # Disabling c++0x-compat should be handled in WebKit, but - # this currently doesn't work because gcc_version is not set - # correctly when building with the Android build system. - # TODO(torne): Fix this in WebKit. - "-Wno-error=c++0x-compat", - # Other things unrelated to -Wextra: "-Wno-non-virtual-dtor", "-Wno-sign-promo", ] } - - if (gcc_version >= 48) { - # Don't warn about the "typedef 'foo' locally defined but not used" - # for gcc 4.8. - # TODO: remove this flag once all builds work. See crbug.com/227506 - default_warning_flags += [ "-Wno-unused-local-typedefs" ] - } } # chromium_code --------------------------------------------------------------- diff --git a/build/config/gcc/gcc_version.gni b/build/config/gcc/gcc_version.gni deleted file mode 100644 index 6741e45bf39..00000000000 --- a/build/config/gcc/gcc_version.gni +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -if (is_android) { - gcc_version = 49 -} else if (current_toolchain == "//build/toolchain/cros:target" || - current_toolchain == "//build/toolchain/linux:mipsel") { - gcc_version = exec_script("../../compiler_version.py", - [ - "target", - "compiler", - ], - "value") -} else if (current_toolchain == "//build/toolchain/linux:x64" || - current_toolchain == "//build/toolchain/linux:x86") { - # These are both the same and just use the default gcc on the system. - gcc_version = exec_script("../../compiler_version.py", - [ - "host", - "compiler", - ], - "value") -} else { - gcc_version = 0 -} diff --git a/build/config/mips.gni b/build/config/mips.gni deleted file mode 100644 index 1b406572c73..00000000000 --- a/build/config/mips.gni +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -if (current_cpu == "mipsel") { - declare_args() { - # MIPS arch variant. Possible values are: - # "r1" - # "r2" - # "r6" - mips_arch_variant = "r1" - - # MIPS DSP ASE revision. Possible values are: - # 0: unavailable - # 1: revision 1 - # 2: revision 2 - mips_dsp_rev = 0 - - # MIPS floating-point ABI. Possible values are: - # "hard": sets the GCC -mhard-float option. - # "soft": sets the GCC -msoft-float option. - mips_float_abi = "hard" - - # MIPS32 floating-point register width. Possible values are: - # "fp32": sets the GCC -mfp32 option. - # "fp64": sets the GCC -mfp64 option. - # "fpxx": sets the GCC -mfpxx option. - mips_fpu_mode = "fp32" - } -} else if (current_cpu == "mips64el") { - # MIPS arch variant. Possible values are: - # "r2" - # "r6" - if (is_android) { - declare_args() { - mips_arch_variant = "r6" - } - } else { - declare_args() { - mips_arch_variant = "r2" - } - } -} diff --git a/build/config/sysroot.gni b/build/config/sysroot.gni index bb29e6c1340..1eed2e883fb 100644 --- a/build/config/sysroot.gni +++ b/build/config/sysroot.gni @@ -33,14 +33,10 @@ if (current_toolchain == default_toolchain && target_sysroot != "") { sysroot = rebase_path("$android_ndk_root/$x86_android_sysroot_subdir") } else if (current_cpu == "arm") { sysroot = rebase_path("$android_ndk_root/$arm_android_sysroot_subdir") - } else if (current_cpu == "mipsel") { - sysroot = rebase_path("$android_ndk_root/$mips_android_sysroot_subdir") } else if (current_cpu == "x64") { sysroot = rebase_path("$android_ndk_root/$x86_64_android_sysroot_subdir") } else if (current_cpu == "arm64") { sysroot = rebase_path("$android_ndk_root/$arm64_android_sysroot_subdir") - } else if (current_cpu == "mips64") { - sysroot = rebase_path("$android_ndk_root/$mips64_android_sysroot_subdir") } else { sysroot = "" } diff --git a/build/toolchain/android/BUILD.gn b/build/toolchain/android/BUILD.gn index 51f03206088..f6806099e77 100644 --- a/build/toolchain/android/BUILD.gn +++ b/build/toolchain/android/BUILD.gn @@ -132,14 +132,6 @@ android_gcc_toolchains_helper("arm") { toolchain_cpu = "arm" } -android_gcc_toolchains_helper("mipsel") { - android_ndk_sysroot = "$android_ndk_root/$mips_android_sysroot_subdir" - android_ndk_lib_dir = "usr/lib" - - tool_prefix = "$mips_android_toolchain_root/bin/mipsel-linux-android-" - toolchain_cpu = "mipsel" -} - android_gcc_toolchains_helper("x64") { android_ndk_sysroot = "$android_ndk_root/$x86_64_android_sysroot_subdir" android_ndk_lib_dir = "usr/lib64" @@ -155,11 +147,3 @@ android_gcc_toolchains_helper("arm64") { tool_prefix = "$arm64_android_toolchain_root/bin/aarch64-linux-android-" toolchain_cpu = "aarch64" } - -android_gcc_toolchains_helper("mips64el") { - android_ndk_sysroot = "$android_ndk_root/$mips64_android_sysroot_subdir" - android_ndk_lib_dir = "usr/lib64" - - tool_prefix = "$mips64_android_toolchain_root/bin/mipsel-linux-android-" - toolchain_cpu = "mipsel64el" -} diff --git a/build/toolchain/linux/BUILD.gn b/build/toolchain/linux/BUILD.gn index 79313f34b25..7ad72ac34db 100644 --- a/build/toolchain/linux/BUILD.gn +++ b/build/toolchain/linux/BUILD.gn @@ -126,17 +126,3 @@ gcc_toolchain("x64") { toolchain_os = "linux" is_clang = false } - -gcc_toolchain("mipsel") { - cc = "${compiler_prefix}${toolchain_prefix}gcc" - cxx = "${compiler_prefix}${toolchain_prefix}g++" - ar = "${toolchain_prefix}ar" - ld = cxx - readelf = "${toolchain_prefix}readelf" - nm = "${toolchain_prefix}nm" - strip = "${toolchain_prefix}strip" - - toolchain_cpu = "${target_cpu}" - toolchain_os = "linux" - is_clang = is_clang -} diff --git a/pkg/pkg.status b/pkg/pkg.status index 73033458fb7..860edb25b1d 100644 --- a/pkg/pkg.status +++ b/pkg/pkg.status @@ -189,7 +189,7 @@ dart_messages/test/dart_messages_test: Skip # Uses dart:io. [ $browser || $jscl ] kernel/test/*: SkipByDesign # Uses dart:io and bigints. -[ $runtime == vm && ($arch == simarm64 || $arch == simarm || $arch == simarmv6 || $arch == simarmv5te || $arch == simmips || $arch == armv6 || $arch == armv5te) ] +[ $runtime == vm && ($arch == simarm64 || $arch == simarm || $arch == simarmv6 || $arch == simarmv5te || $arch == armv6 || $arch == armv5te) ] # Timeout. These are not unit tests. They do not run efficiently on our # simulator or low-end devices. *: Skip diff --git a/runtime/BUILD.gn b/runtime/BUILD.gn index 2d4e7dda076..c74e0c4895b 100644 --- a/runtime/BUILD.gn +++ b/runtime/BUILD.gn @@ -26,7 +26,7 @@ declare_args() { # Explicitly set the target architecture in case of precompilation. Leaving # this unspecified results in automatic target architecture detection. - # Available options are: arm, arm64, mips, x64 and ia32 + # Available options are: arm, arm64, x64, and ia32 dart_target_arch = "" # The optimization level to use for debug builds. @@ -105,8 +105,6 @@ config("dart_config") { defines += [ "TARGET_ARCH_ARM_5TE" ] } else if (dart_target_arch == "arm64" || dart_target_arch == "simarm64") { defines += [ "TARGET_ARCH_ARM64" ] - } else if (dart_target_arch == "mips" || dart_target_arch == "simmips") { - defines += [ "TARGET_ARCH_MIPS" ] } else if (dart_target_arch == "x64") { defines += [ "TARGET_ARCH_X64" ] } else if (dart_target_arch == "ia32") { diff --git a/runtime/bin/platform.h b/runtime/bin/platform.h index 674d94f16df..dfe9cde9a06 100644 --- a/runtime/bin/platform.h +++ b/runtime/bin/platform.h @@ -25,7 +25,7 @@ class Platform { static const char* OperatingSystem(); // Returns the architecture name of the processor the VM is running on - // (ia32, x64, arm, arm64, or mips). + // (ia32, x64, arm, or arm64). static const char* HostArchitecture() { #if defined(HOST_ARCH_ARM) return "arm"; @@ -33,8 +33,6 @@ class Platform { return "arm64"; #elif defined(HOST_ARCH_IA32) return "ia32"; -#elif defined(HOST_ARCH_MIPS) - return "mips"; #elif defined(HOST_ARCH_X64) return "x64"; #else diff --git a/runtime/observatory/tests/service/service.status b/runtime/observatory/tests/service/service.status index 4f0c14e6922..a479212ca67 100644 --- a/runtime/observatory/tests/service/service.status +++ b/runtime/observatory/tests/service/service.status @@ -43,9 +43,9 @@ debugger_location_second_test: Pass, Slow debugger_location_test: Pass, Slow # These tests are slow on simulators. -[ $arch == simarm || $arch == simmips || $arch == simarm64 ] +[ $arch == simarm || $arch == simarm64 ] *: Pass, Slow -[ $mode == debug && ($arch == simarm || $arch == simmips || $arch == simarm64) ] +[ $mode == debug && ($arch == simarm || $arch == simarm64) ] *: SkipSlow # All tests use dart:io diff --git a/runtime/platform/globals.h b/runtime/platform/globals.h index b0c031d8399..06d1086db77 100644 --- a/runtime/platform/globals.h +++ b/runtime/platform/globals.h @@ -212,13 +212,8 @@ typedef simd128_value_t fpu_register_t; #elif defined(_M_IX86) || defined(__i386__) #define HOST_ARCH_IA32 1 #define ARCH_IS_32_BIT 1 -#if defined(TARGET_ARCH_MIPS) -#define kFpuRegisterSize 8 -typedef double fpu_register_t; -#else #define kFpuRegisterSize 16 typedef simd128_value_t fpu_register_t; -#endif #elif defined(__ARMEL__) #define HOST_ARCH_ARM 1 #define ARCH_IS_32_BIT 1 @@ -241,14 +236,6 @@ typedef simd_value_t fpu_register_t; reinterpret_cast(addr)->data_[3] = value.data_[3]; \ } while (0) -#elif defined(__MIPSEL__) -#define HOST_ARCH_MIPS 1 -#define ARCH_IS_32_BIT 1 -#define kFpuRegisterSize 8 -typedef double fpu_register_t; -#elif defined(__MIPSEB__) -#error Big-endian MIPS is not supported by Dart. Try passing -EL to your \ - compiler. #elif defined(__aarch64__) #define HOST_ARCH_ARM64 1 #define ARCH_IS_64_BIT 1 @@ -316,13 +303,11 @@ typedef simd128_value_t fpu_register_t; #error Automatic compiler detection failed. #endif -#if !defined(TARGET_ARCH_MIPS) && !defined(TARGET_ARCH_ARM) && \ - !defined(TARGET_ARCH_X64) && !defined(TARGET_ARCH_IA32) && \ - !defined(TARGET_ARCH_ARM64) && !defined(TARGET_ARCH_DBC) +#if !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_X64) && \ + !defined(TARGET_ARCH_IA32) && !defined(TARGET_ARCH_ARM64) && \ + !defined(TARGET_ARCH_DBC) // No target architecture specified pick the one matching the host architecture. -#if defined(HOST_ARCH_MIPS) -#define TARGET_ARCH_MIPS 1 -#elif defined(HOST_ARCH_ARM) +#if defined(HOST_ARCH_ARM) #define TARGET_ARCH_ARM 1 #elif defined(HOST_ARCH_X64) #define TARGET_ARCH_X64 1 @@ -341,8 +326,7 @@ typedef simd128_value_t fpu_register_t; #if !defined(ARCH_IS_64_BIT) #error Mismatched Host/Target architectures. #endif -#elif defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM) || \ - defined(TARGET_ARCH_MIPS) +#elif defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM) #if !defined(ARCH_IS_32_BIT) #error Mismatched Host/Target architectures. #endif @@ -363,11 +347,6 @@ typedef simd128_value_t fpu_register_t; #define USING_SIMULATOR 1 #endif -#elif defined(TARGET_ARCH_MIPS) -#if !defined(HOST_ARCH_MIPS) -#define USING_SIMULATOR 1 -#endif - #elif defined(TARGET_ARCH_DBC) #define USING_SIMULATOR 1 @@ -663,8 +642,7 @@ inline D bit_copy(const S& source) { } -#if defined(HOST_ARCH_ARM) || defined(HOST_ARCH_MIPS) || \ - defined(HOST_ARCH_ARM64) +#if defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64) // Similar to bit_copy and bit_cast, but does take the type from the argument. template static inline T ReadUnaligned(const T* ptr) { @@ -681,7 +659,7 @@ static inline void StoreUnaligned(T* ptr, T value) { memcpy(reinterpret_cast(ptr), reinterpret_cast(&value), sizeof(value)); } -#else // !(HOST_ARCH_ARM || HOST_ARCH_MIPS || HOST_ARCH_ARM64) +#else // !(HOST_ARCH_ARM || HOST_ARCH_ARM64) // Similar to bit_copy and bit_cast, but does take the type from the argument. template static inline T ReadUnaligned(const T* ptr) { @@ -694,7 +672,7 @@ template static inline void StoreUnaligned(T* ptr, T value) { *ptr = value; } -#endif // !(HOST_ARCH_ARM || HOST_ARCH_MIPS || HOST_ARCH_ARM64) +#endif // !(HOST_ARCH_ARM || HOST_ARCH_ARM64) // On Windows the reentrent version of strtok is called diff --git a/runtime/tools/benchmark.py b/runtime/tools/benchmark.py index 694b87c7bb1..b3a587fb58b 100755 --- a/runtime/tools/benchmark.py +++ b/runtime/tools/benchmark.py @@ -54,7 +54,7 @@ def BuildOptions(): default=False, action="store_true") result.add_option("--arch", help='Target architectures (comma-separated).', - metavar='[all,ia32,x64,simarm,simmips,arm,mips,dartc]', + metavar='[all,ia32,x64,simarm,arm,dartc]', default=utils.GuessArchitecture()) result.add_option("--executable", help='Virtual machine to execute.', @@ -68,7 +68,7 @@ def BuildOptions(): def ProcessOptions(options): if options.arch == 'all': - options.arch = 'ia32,x64,simarm,simmips,dartc' + options.arch = 'ia32,x64,simarm,dartc' if options.mode == 'all': options.mode = 'debug,release' options.mode = options.mode.split(',') @@ -78,7 +78,7 @@ def ProcessOptions(options): print "Unknown mode %s" % mode return False for arch in options.arch: - if not arch in ['ia32', 'x64', 'simarm', 'simmips', 'arm', 'mips', 'dartc']: + if not arch in ['ia32', 'x64', 'simarm', 'arm', 'dartc']: print "Unknown arch %s" % arch return False return True diff --git a/runtime/tools/gyp/runtime-configurations.gypi b/runtime/tools/gyp/runtime-configurations.gypi index 2de3bf9a0f0..04b7dd52f9c 100644 --- a/runtime/tools/gyp/runtime-configurations.gypi +++ b/runtime/tools/gyp/runtime-configurations.gypi @@ -93,13 +93,6 @@ }, }, - 'Dart_simmips_Base': { - 'abstract': 1, - 'xcode_settings': { - 'ARCHS': [ 'i386' ], - }, - }, - 'Dart_Debug': { 'abstract': 1, 'defines': [ diff --git a/runtime/vm/assembler.cc b/runtime/vm/assembler.cc index 77d61b0de0f..884326f397d 100644 --- a/runtime/vm/assembler.cc +++ b/runtime/vm/assembler.cc @@ -22,11 +22,8 @@ DEFINE_FLAG(bool, code_comments, false, "Include comments into code and disassembly"); -#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) -DEFINE_FLAG(bool, - use_far_branches, - false, - "Enable far branches for ARM and MIPS"); +#if defined(TARGET_ARCH_ARM) +DEFINE_FLAG(bool, use_far_branches, false, "Enable far branches for ARM."); #endif static uword NewContents(intptr_t capacity) { diff --git a/runtime/vm/assembler.h b/runtime/vm/assembler.h index 87d3a1e7bff..768297a2818 100644 --- a/runtime/vm/assembler.h +++ b/runtime/vm/assembler.h @@ -14,8 +14,7 @@ namespace dart { -#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \ - defined(TARGET_ARCH_MIPS) +#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) DECLARE_FLAG(bool, use_far_branches); #endif @@ -335,8 +334,6 @@ enum RestorePP { kRestoreCallerPP, kKeepCalleePP }; #include "vm/assembler_arm.h" #elif defined(TARGET_ARCH_ARM64) #include "vm/assembler_arm64.h" -#elif defined(TARGET_ARCH_MIPS) -#include "vm/assembler_mips.h" #elif defined(TARGET_ARCH_DBC) #include "vm/assembler_dbc.h" #else diff --git a/runtime/vm/assembler_ia32.h b/runtime/vm/assembler_ia32.h index 9f663c2967f..993da0aa0b0 100644 --- a/runtime/vm/assembler_ia32.h +++ b/runtime/vm/assembler_ia32.h @@ -301,7 +301,7 @@ class Assembler : public ValueObject { jit_cookie_(0), comments_(), code_(Code::ZoneHandle()) { - // This mode is only needed and implemented for MIPS and ARM. + // This mode is only needed and implemented for ARM. ASSERT(!use_far_branches); } ~Assembler() {} diff --git a/runtime/vm/assembler_mips.cc b/runtime/vm/assembler_mips.cc deleted file mode 100644 index 846a817f738..00000000000 --- a/runtime/vm/assembler_mips.cc +++ /dev/null @@ -1,1410 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" // NOLINT -#if defined(TARGET_ARCH_MIPS) - -#include "vm/assembler.h" -#include "vm/longjump.h" -#include "vm/runtime_entry.h" -#include "vm/simulator.h" -#include "vm/stack_frame.h" -#include "vm/stub_code.h" - -namespace dart { - -DECLARE_FLAG(bool, check_code_pointer); -DECLARE_FLAG(bool, inline_alloc); -#if defined(USING_SIMULATOR) -DECLARE_FLAG(int, trace_sim_after); -#endif - -void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { - ASSERT(Utils::IsAligned(data, 4)); - ASSERT(Utils::IsAligned(length, 4)); - const uword end = data + length; - while (data < end) { - *reinterpret_cast(data) = Instr::kBreakPointInstruction; - data += 4; - } -} - - -void Assembler::GetNextPC(Register dest, Register temp) { - if (temp != kNoRegister) { - mov(temp, RA); - } - EmitRegImmType(REGIMM, R0, BGEZAL, 1); - mov(dest, RA); - if (temp != kNoRegister) { - mov(RA, temp); - } -} - - -static bool CanEncodeBranchOffset(int32_t offset) { - ASSERT(Utils::IsAligned(offset, 4)); - return Utils::IsInt(18, offset); -} - - -int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t instr) { - if (!CanEncodeBranchOffset(offset)) { - ASSERT(!use_far_branches()); - Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error()); - } - - // Properly preserve only the bits supported in the instruction. - offset >>= 2; - offset &= kBranchOffsetMask; - return (instr & ~kBranchOffsetMask) | offset; -} - - -static intptr_t DecodeBranchOffset(int32_t instr) { - // Sign-extend, left-shift by 2. - return (((instr & kBranchOffsetMask) << 16) >> 14); -} - - -static int32_t DecodeLoadImmediate(int32_t ori_instr, int32_t lui_instr) { - return (((lui_instr & kBranchOffsetMask) << 16) | - (ori_instr & kBranchOffsetMask)); -} - - -static int32_t EncodeLoadImmediate(int32_t dest, int32_t instr) { - return ((instr & ~kBranchOffsetMask) | (dest & kBranchOffsetMask)); -} - - -class PatchFarJump : public AssemblerFixup { - public: - PatchFarJump() {} - - void Process(const MemoryRegion& region, intptr_t position) { - const int32_t high = region.Load(position); - const int32_t low = region.Load(position + Instr::kInstrSize); - const int32_t offset = DecodeLoadImmediate(low, high); - const int32_t dest = region.start() + offset; - - if ((Instr::At(reinterpret_cast(&high))->OpcodeField() == LUI) && - (Instr::At(reinterpret_cast(&low))->OpcodeField() == ORI)) { - // Change the offset to the absolute value. - const int32_t encoded_low = - EncodeLoadImmediate(dest & kBranchOffsetMask, low); - const int32_t encoded_high = EncodeLoadImmediate(dest >> 16, high); - - region.Store(position, encoded_high); - region.Store(position + Instr::kInstrSize, encoded_low); - return; - } - // If the offset loading instructions aren't there, we must have replaced - // the far branch with a near one, and so these instructions should be NOPs. - ASSERT((high == Instr::kNopInstruction) && (low == Instr::kNopInstruction)); - } - - virtual bool IsPointerOffset() const { return false; } -}; - - -void Assembler::EmitFarJump(int32_t offset, bool link) { - ASSERT(!in_delay_slot_); - ASSERT(use_far_branches()); - const uint16_t low = Utils::Low16Bits(offset); - const uint16_t high = Utils::High16Bits(offset); - buffer_.EmitFixup(new PatchFarJump()); - lui(T9, Immediate(high)); - ori(T9, T9, Immediate(low)); - if (link) { - EmitRType(SPECIAL, T9, R0, RA, 0, JALR); - } else { - EmitRType(SPECIAL, T9, R0, R0, 0, JR); - } -} - - -static Opcode OppositeBranchOpcode(Opcode b) { - switch (b) { - case BEQ: - return BNE; - case BNE: - return BEQ; - case BGTZ: - return BLEZ; - case BLEZ: - return BGTZ; - case BEQL: - return BNEL; - case BNEL: - return BEQL; - case BGTZL: - return BLEZL; - case BLEZL: - return BGTZL; - default: - UNREACHABLE(); - break; - } - return BNE; -} - - -void Assembler::EmitFarBranch(Opcode b, - Register rs, - Register rt, - int32_t offset) { - ASSERT(!in_delay_slot_); - EmitIType(b, rs, rt, 4); - nop(); - EmitFarJump(offset, false); -} - - -static RtRegImm OppositeBranchNoLink(RtRegImm b) { - switch (b) { - case BLTZ: - return BGEZ; - case BGEZ: - return BLTZ; - case BLTZAL: - return BGEZ; - case BGEZAL: - return BLTZ; - default: - UNREACHABLE(); - break; - } - return BLTZ; -} - - -void Assembler::EmitFarRegImmBranch(RtRegImm b, Register rs, int32_t offset) { - ASSERT(!in_delay_slot_); - EmitRegImmType(REGIMM, rs, b, 4); - nop(); - EmitFarJump(offset, (b == BLTZAL) || (b == BGEZAL)); -} - - -void Assembler::EmitFarFpuBranch(bool kind, int32_t offset) { - ASSERT(!in_delay_slot_); - const uint32_t b16 = kind ? (1 << 16) : 0; - Emit(COP1 << kOpcodeShift | COP1_BC << kCop1SubShift | b16 | 4); - nop(); - EmitFarJump(offset, false); -} - - -void Assembler::EmitBranch(Opcode b, Register rs, Register rt, Label* label) { - ASSERT(!in_delay_slot_); - if (label->IsBound()) { - // Relative destination from an instruction after the branch. - const int32_t dest = - label->Position() - (buffer_.Size() + Instr::kInstrSize); - if (use_far_branches() && !CanEncodeBranchOffset(dest)) { - EmitFarBranch(OppositeBranchOpcode(b), rs, rt, label->Position()); - } else { - const uint16_t dest_off = EncodeBranchOffset(dest, 0); - EmitIType(b, rs, rt, dest_off); - } - } else { - const intptr_t position = buffer_.Size(); - if (use_far_branches()) { - const uint32_t dest_off = label->position_; - EmitFarBranch(b, rs, rt, dest_off); - } else { - const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); - EmitIType(b, rs, rt, dest_off); - } - label->LinkTo(position); - } -} - - -void Assembler::EmitRegImmBranch(RtRegImm b, Register rs, Label* label) { - ASSERT(!in_delay_slot_); - if (label->IsBound()) { - // Relative destination from an instruction after the branch. - const int32_t dest = - label->Position() - (buffer_.Size() + Instr::kInstrSize); - if (use_far_branches() && !CanEncodeBranchOffset(dest)) { - EmitFarRegImmBranch(OppositeBranchNoLink(b), rs, label->Position()); - } else { - const uint16_t dest_off = EncodeBranchOffset(dest, 0); - EmitRegImmType(REGIMM, rs, b, dest_off); - } - } else { - const intptr_t position = buffer_.Size(); - if (use_far_branches()) { - const uint32_t dest_off = label->position_; - EmitFarRegImmBranch(b, rs, dest_off); - } else { - const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); - EmitRegImmType(REGIMM, rs, b, dest_off); - } - label->LinkTo(position); - } -} - - -void Assembler::EmitFpuBranch(bool kind, Label* label) { - ASSERT(!in_delay_slot_); - const int32_t b16 = kind ? (1 << 16) : 0; // Bit 16 set for branch on true. - if (label->IsBound()) { - // Relative destination from an instruction after the branch. - const int32_t dest = - label->Position() - (buffer_.Size() + Instr::kInstrSize); - if (use_far_branches() && !CanEncodeBranchOffset(dest)) { - EmitFarFpuBranch(kind, label->Position()); - } else { - const uint16_t dest_off = EncodeBranchOffset(dest, 0); - Emit(COP1 << kOpcodeShift | COP1_BC << kCop1SubShift | b16 | dest_off); - } - } else { - const intptr_t position = buffer_.Size(); - if (use_far_branches()) { - const uint32_t dest_off = label->position_; - EmitFarFpuBranch(kind, dest_off); - } else { - const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); - Emit(COP1 << kOpcodeShift | COP1_BC << kCop1SubShift | b16 | dest_off); - } - label->LinkTo(position); - } -} - - -static int32_t FlipBranchInstruction(int32_t instr) { - Instr* i = Instr::At(reinterpret_cast(&instr)); - if (i->OpcodeField() == REGIMM) { - RtRegImm b = OppositeBranchNoLink(i->RegImmFnField()); - i->SetRegImmFnField(b); - return i->InstructionBits(); - } else if (i->OpcodeField() == COP1) { - return instr ^ (1 << 16); - } - Opcode b = OppositeBranchOpcode(i->OpcodeField()); - i->SetOpcodeField(b); - return i->InstructionBits(); -} - - -void Assembler::Bind(Label* label) { - ASSERT(!label->IsBound()); - intptr_t bound_pc = buffer_.Size(); - - while (label->IsLinked()) { - int32_t position = label->Position(); - int32_t dest = bound_pc - (position + Instr::kInstrSize); - - if (use_far_branches() && !CanEncodeBranchOffset(dest)) { - // Far branches are enabled and we can't encode the branch offset. - - // Grab the branch instruction. We'll need to flip it later. - const int32_t branch = buffer_.Load(position); - - // Grab instructions that load the offset. - const int32_t high = - buffer_.Load(position + 2 * Instr::kInstrSize); - const int32_t low = - buffer_.Load(position + 3 * Instr::kInstrSize); - - // Change from relative to the branch to relative to the assembler buffer. - dest = buffer_.Size(); - const int32_t encoded_low = - EncodeLoadImmediate(dest & kBranchOffsetMask, low); - const int32_t encoded_high = EncodeLoadImmediate(dest >> 16, high); - - // Skip the unconditional far jump if the test fails by flipping the - // sense of the branch instruction. - buffer_.Store(position, FlipBranchInstruction(branch)); - buffer_.Store(position + 2 * Instr::kInstrSize, encoded_high); - buffer_.Store(position + 3 * Instr::kInstrSize, encoded_low); - label->position_ = DecodeLoadImmediate(low, high); - } else if (use_far_branches() && CanEncodeBranchOffset(dest)) { - // We assembled a far branch, but we don't need it. Replace with a near - // branch. - - // Grab the link to the next branch. - const int32_t high = - buffer_.Load(position + 2 * Instr::kInstrSize); - const int32_t low = - buffer_.Load(position + 3 * Instr::kInstrSize); - - // Grab the original branch instruction. - int32_t branch = buffer_.Load(position); - - // Clear out the old (far) branch. - for (int i = 0; i < 5; i++) { - buffer_.Store(position + i * Instr::kInstrSize, - Instr::kNopInstruction); - } - - // Calculate the new offset. - dest = dest - 4 * Instr::kInstrSize; - const int32_t encoded = EncodeBranchOffset(dest, branch); - buffer_.Store(position + 4 * Instr::kInstrSize, encoded); - label->position_ = DecodeLoadImmediate(low, high); - } else { - const int32_t next = buffer_.Load(position); - const int32_t encoded = EncodeBranchOffset(dest, next); - buffer_.Store(position, encoded); - label->position_ = DecodeBranchOffset(next); - } - } - label->BindTo(bound_pc); - delay_slot_available_ = false; -} - - -void Assembler::LoadWordFromPoolOffset(Register rd, - int32_t offset, - Register pp) { - ASSERT((pp != PP) || constant_pool_allowed()); - ASSERT(!in_delay_slot_); - ASSERT(rd != pp); - if (Address::CanHoldOffset(offset)) { - lw(rd, Address(pp, offset)); - } else { - const int16_t offset_low = Utils::Low16Bits(offset); // Signed. - offset -= offset_low; - const uint16_t offset_high = Utils::High16Bits(offset); // Unsigned. - if (offset_high != 0) { - lui(rd, Immediate(offset_high)); - addu(rd, rd, pp); - lw(rd, Address(rd, offset_low)); - } else { - lw(rd, Address(pp, offset_low)); - } - } -} - - -void Assembler::AdduDetectOverflow(Register rd, - Register rs, - Register rt, - Register ro, - Register scratch) { - ASSERT(!in_delay_slot_); - ASSERT(rd != ro); - ASSERT(rd != TMP); - ASSERT(ro != TMP); - ASSERT(ro != rs); - ASSERT(ro != rt); - - if ((rs == rt) && (rd == rs)) { - ASSERT(scratch != kNoRegister); - ASSERT(scratch != TMP); - ASSERT(rd != scratch); - ASSERT(ro != scratch); - ASSERT(rs != scratch); - ASSERT(rt != scratch); - mov(scratch, rt); - rt = scratch; - } - - if (rd == rs) { - mov(TMP, rs); // Preserve rs. - addu(rd, rs, rt); // rs is overwritten. - xor_(TMP, rd, TMP); // Original rs. - xor_(ro, rd, rt); - and_(ro, ro, TMP); - } else if (rd == rt) { - mov(TMP, rt); // Preserve rt. - addu(rd, rs, rt); // rt is overwritten. - xor_(TMP, rd, TMP); // Original rt. - xor_(ro, rd, rs); - and_(ro, ro, TMP); - } else { - addu(rd, rs, rt); - xor_(ro, rd, rs); - xor_(TMP, rd, rt); - and_(ro, TMP, ro); - } -} - - -void Assembler::SubuDetectOverflow(Register rd, - Register rs, - Register rt, - Register ro) { - ASSERT(!in_delay_slot_); - ASSERT(rd != ro); - ASSERT(rd != TMP); - ASSERT(ro != TMP); - ASSERT(ro != rs); - ASSERT(ro != rt); - ASSERT(rs != TMP); - ASSERT(rt != TMP); - - // This happens with some crankshaft code. Since Subu works fine if - // left == right, let's not make that restriction here. - if (rs == rt) { - mov(rd, ZR); - mov(ro, ZR); - return; - } - - if (rd == rs) { - mov(TMP, rs); // Preserve left. - subu(rd, rs, rt); // Left is overwritten. - xor_(ro, rd, TMP); // scratch is original left. - xor_(TMP, TMP, rs); // scratch is original left. - and_(ro, TMP, ro); - } else if (rd == rt) { - mov(TMP, rt); // Preserve right. - subu(rd, rs, rt); // Right is overwritten. - xor_(ro, rd, rs); - xor_(TMP, rs, TMP); // Original right. - and_(ro, TMP, ro); - } else { - subu(rd, rs, rt); - xor_(ro, rd, rs); - xor_(TMP, rs, rt); - and_(ro, TMP, ro); - } -} - - -void Assembler::CheckCodePointer() { -#ifdef DEBUG - if (!FLAG_check_code_pointer) { - return; - } - Comment("CheckCodePointer"); - Label cid_ok, instructions_ok; - Push(CMPRES1); - Push(CMPRES2); - LoadClassId(CMPRES1, CODE_REG); - BranchEqual(CMPRES1, Immediate(kCodeCid), &cid_ok); - break_(0); - Bind(&cid_ok); - GetNextPC(CMPRES1, TMP); - const intptr_t entry_offset = CodeSize() - Instr::kInstrSize + - Instructions::HeaderSize() - kHeapObjectTag; - AddImmediate(CMPRES1, CMPRES1, -entry_offset); - lw(CMPRES2, FieldAddress(CODE_REG, Code::saved_instructions_offset())); - BranchEqual(CMPRES1, CMPRES2, &instructions_ok); - break_(1); - Bind(&instructions_ok); - Pop(CMPRES2); - Pop(CMPRES1); -#endif -} - - -void Assembler::RestoreCodePointer() { - lw(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize)); - CheckCodePointer(); -} - - -void Assembler::Branch(const StubEntry& stub_entry, Register pp) { - ASSERT(!in_delay_slot_); - const Code& target_code = Code::ZoneHandle(stub_entry.code()); - const int32_t offset = ObjectPool::element_offset( - object_pool_wrapper_.AddObject(target_code, kPatchable)); - LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp); - lw(TMP, FieldAddress(CODE_REG, Code::entry_point_offset())); - jr(TMP); -} - - -void Assembler::BranchLink(const ExternalLabel* label) { - ASSERT(!in_delay_slot_); - LoadImmediate(T9, label->address()); - jalr(T9); -} - - -void Assembler::BranchLink(const Code& target, Patchability patchable) { - ASSERT(!in_delay_slot_); - const int32_t offset = ObjectPool::element_offset( - object_pool_wrapper_.FindObject(target, patchable)); - LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag); - lw(T9, FieldAddress(CODE_REG, Code::entry_point_offset())); - jalr(T9); - if (patchable == kPatchable) { - delay_slot_available_ = false; // CodePatcher expects a nop. - } -} - - -void Assembler::BranchLink(const StubEntry& stub_entry, - Patchability patchable) { - BranchLink(Code::ZoneHandle(stub_entry.code()), patchable); -} - - -void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { - BranchLink(Code::ZoneHandle(stub_entry.code()), kPatchable); -} - - -void Assembler::BranchLinkToRuntime() { - lw(T9, Address(THR, Thread::call_to_runtime_entry_point_offset())); - jalr(T9); - delay_slot()->lw(CODE_REG, - Address(THR, Thread::call_to_runtime_stub_offset())); -} - - -void Assembler::BranchLinkWithEquivalence(const StubEntry& stub_entry, - const Object& equivalence) { - const Code& target = Code::ZoneHandle(stub_entry.code()); - ASSERT(!in_delay_slot_); - const int32_t offset = ObjectPool::element_offset( - object_pool_wrapper_.FindObject(target, equivalence)); - LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag); - lw(T9, FieldAddress(CODE_REG, Code::entry_point_offset())); - jalr(T9); - delay_slot_available_ = false; // CodePatcher expects a nop. -} - - -bool Assembler::CanLoadFromObjectPool(const Object& object) const { - ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); - ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); - ASSERT(!Thread::CanLoadFromThread(object)); - if (!constant_pool_allowed()) { - return false; - } - - ASSERT(object.IsNotTemporaryScopedHandle()); - ASSERT(object.IsOld()); - return true; -} - - -void Assembler::LoadObjectHelper(Register rd, - const Object& object, - bool is_unique) { - ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); - ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); - ASSERT(!in_delay_slot_); - if (Thread::CanLoadFromThread(object)) { - // Load common VM constants from the thread. This works also in places where - // no constant pool is set up (e.g. intrinsic code). - lw(rd, Address(THR, Thread::OffsetFromThread(object))); - } else if (object.IsSmi()) { - // Relocation doesn't apply to Smis. - LoadImmediate(rd, reinterpret_cast(object.raw())); - } else if (CanLoadFromObjectPool(object)) { - // Make sure that class CallPattern is able to decode this load from the - // object pool. - const int32_t offset = ObjectPool::element_offset( - is_unique ? object_pool_wrapper_.AddObject(object) - : object_pool_wrapper_.FindObject(object)); - LoadWordFromPoolOffset(rd, offset - kHeapObjectTag); - } else { - UNREACHABLE(); - } -} - - -void Assembler::LoadObject(Register rd, const Object& object) { - LoadObjectHelper(rd, object, false); -} - - -void Assembler::LoadUniqueObject(Register rd, const Object& object) { - LoadObjectHelper(rd, object, true); -} - - -void Assembler::LoadFunctionFromCalleePool(Register dst, - const Function& function, - Register new_pp) { - const int32_t offset = - ObjectPool::element_offset(object_pool_wrapper_.FindObject(function)); - LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp); -} - - -void Assembler::LoadNativeEntry(Register rd, - const ExternalLabel* label, - Patchability patchable) { - const int32_t offset = ObjectPool::element_offset( - object_pool_wrapper_.FindNativeEntry(label, patchable)); - LoadWordFromPoolOffset(rd, offset - kHeapObjectTag); -} - - -void Assembler::PushObject(const Object& object) { - ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); - ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); - ASSERT(!in_delay_slot_); - LoadObject(TMP, object); - Push(TMP); -} - - -// Preserves object and value registers. -void Assembler::StoreIntoObjectFilterNoSmi(Register object, - Register value, - Label* no_update) { - ASSERT(!in_delay_slot_); - COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) && - (kOldObjectAlignmentOffset == 0)); - - // Write-barrier triggers if the value is in the new space (has bit set) and - // the object is in the old space (has bit cleared). - // To check that, we compute value & ~object and skip the write barrier - // if the bit is not set. We can't destroy the object. - nor(TMP, ZR, object); - and_(TMP, value, TMP); - andi(CMPRES1, TMP, Immediate(kNewObjectAlignmentOffset)); - beq(CMPRES1, ZR, no_update); -} - - -// Preserves object and value registers. -void Assembler::StoreIntoObjectFilter(Register object, - Register value, - Label* no_update) { - ASSERT(!in_delay_slot_); - // For the value we are only interested in the new/old bit and the tag bit. - // And the new bit with the tag bit. The resulting bit will be 0 for a Smi. - sll(TMP, value, kObjectAlignmentLog2 - 1); - and_(TMP, value, TMP); - // And the result with the negated space bit of the object. - nor(CMPRES1, ZR, object); - and_(TMP, TMP, CMPRES1); - andi(CMPRES1, TMP, Immediate(kNewObjectAlignmentOffset)); - beq(CMPRES1, ZR, no_update); -} - - -void Assembler::StoreIntoObject(Register object, - const Address& dest, - Register value, - bool can_value_be_smi) { - ASSERT(!in_delay_slot_); - ASSERT(object != value); - sw(value, dest); - Label done; - if (can_value_be_smi) { - StoreIntoObjectFilter(object, value, &done); - } else { - StoreIntoObjectFilterNoSmi(object, value, &done); - } - // A store buffer update is required. - if (value != T0) { - // Preserve T0. - addiu(SP, SP, Immediate(-2 * kWordSize)); - sw(T0, Address(SP, 1 * kWordSize)); - } else { - addiu(SP, SP, Immediate(-1 * kWordSize)); - } - sw(RA, Address(SP, 0 * kWordSize)); - if (object != T0) { - mov(T0, object); - } - lw(T9, Address(THR, Thread::update_store_buffer_entry_point_offset())); - jalr(T9); - delay_slot()->lw(CODE_REG, - Address(THR, Thread::update_store_buffer_code_offset())); - lw(RA, Address(SP, 0 * kWordSize)); - if (value != T0) { - // Restore T0. - lw(T0, Address(SP, 1 * kWordSize)); - addiu(SP, SP, Immediate(2 * kWordSize)); - } else { - addiu(SP, SP, Immediate(1 * kWordSize)); - } - Bind(&done); -} - - -void Assembler::StoreIntoObjectOffset(Register object, - int32_t offset, - Register value, - bool can_value_be_smi) { - if (Address::CanHoldOffset(offset - kHeapObjectTag)) { - StoreIntoObject(object, FieldAddress(object, offset), value, - can_value_be_smi); - } else { - AddImmediate(TMP, object, offset - kHeapObjectTag); - StoreIntoObject(object, Address(TMP), value, can_value_be_smi); - } -} - - -void Assembler::StoreIntoObjectNoBarrier(Register object, - const Address& dest, - Register value) { - ASSERT(!in_delay_slot_); - sw(value, dest); -#if defined(DEBUG) - Label done; - StoreIntoObjectFilter(object, value, &done); - Stop("Store buffer update is required"); - Bind(&done); -#endif // defined(DEBUG) - // No store buffer update. -} - - -void Assembler::StoreIntoObjectNoBarrierOffset(Register object, - int32_t offset, - Register value) { - if (Address::CanHoldOffset(offset - kHeapObjectTag)) { - StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); - } else { - AddImmediate(TMP, object, offset - kHeapObjectTag); - StoreIntoObjectNoBarrier(object, Address(TMP), value); - } -} - - -void Assembler::StoreIntoObjectNoBarrier(Register object, - const Address& dest, - const Object& value) { - ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal()); - ASSERT(!value.IsField() || Field::Cast(value).IsOriginal()); - ASSERT(!in_delay_slot_); - ASSERT(value.IsSmi() || value.InVMHeap() || - (value.IsOld() && value.IsNotTemporaryScopedHandle())); - // No store buffer update. - LoadObject(TMP, value); - sw(TMP, dest); -} - - -void Assembler::StoreIntoObjectNoBarrierOffset(Register object, - int32_t offset, - const Object& value) { - if (Address::CanHoldOffset(offset - kHeapObjectTag)) { - StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); - } else { - AddImmediate(TMP, object, offset - kHeapObjectTag); - StoreIntoObjectNoBarrier(object, Address(TMP), value); - } -} - - -void Assembler::LoadIsolate(Register result) { - lw(result, Address(THR, Thread::isolate_offset())); -} - - -void Assembler::LoadClassId(Register result, Register object) { - ASSERT(RawObject::kClassIdTagPos == 16); - ASSERT(RawObject::kClassIdTagSize == 16); - const intptr_t class_id_offset = - Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; - lhu(result, FieldAddress(object, class_id_offset)); -} - - -void Assembler::LoadClassById(Register result, Register class_id) { - ASSERT(!in_delay_slot_); - ASSERT(result != class_id); - LoadIsolate(result); - const intptr_t offset = - Isolate::class_table_offset() + ClassTable::table_offset(); - lw(result, Address(result, offset)); - sll(TMP, class_id, 2); - addu(result, result, TMP); - lw(result, Address(result)); -} - - -void Assembler::LoadClass(Register result, Register object) { - ASSERT(!in_delay_slot_); - ASSERT(TMP != result); - LoadClassId(TMP, object); - LoadClassById(result, TMP); -} - - -void Assembler::LoadClassIdMayBeSmi(Register result, Register object) { - Label heap_object, done; - andi(CMPRES1, object, Immediate(kSmiTagMask)); - bne(CMPRES1, ZR, &heap_object); - LoadImmediate(result, kSmiCid); - b(&done); - Bind(&heap_object); - LoadClassId(result, object); - Bind(&done); -} - - -void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) { - LoadClassIdMayBeSmi(result, object); - SmiTag(result); -} - - -void Assembler::EnterFrame() { - ASSERT(!in_delay_slot_); - addiu(SP, SP, Immediate(-2 * kWordSize)); - sw(RA, Address(SP, 1 * kWordSize)); - sw(FP, Address(SP, 0 * kWordSize)); - mov(FP, SP); -} - - -void Assembler::LeaveFrameAndReturn() { - ASSERT(!in_delay_slot_); - mov(SP, FP); - lw(RA, Address(SP, 1 * kWordSize)); - lw(FP, Address(SP, 0 * kWordSize)); - Ret(); - delay_slot()->addiu(SP, SP, Immediate(2 * kWordSize)); -} - - -void Assembler::EnterStubFrame(intptr_t frame_size) { - EnterDartFrame(frame_size); -} - - -void Assembler::LeaveStubFrame() { - LeaveDartFrame(); -} - - -void Assembler::LeaveStubFrameAndReturn(Register ra) { - LeaveDartFrameAndReturn(ra); -} - - -// T0 receiver, S5 guarded cid as Smi -void Assembler::MonomorphicCheckedEntry() { - ASSERT(has_single_entry_point_); - has_single_entry_point_ = false; - bool saved_use_far_branches = use_far_branches(); - set_use_far_branches(false); - - Label have_cid, miss; - Bind(&miss); - lw(T9, Address(THR, Thread::monomorphic_miss_entry_offset())); - jr(T9); - - Comment("MonomorphicCheckedEntry"); - ASSERT(CodeSize() == Instructions::kCheckedEntryOffset); - SmiUntag(S5); - LoadClassIdMayBeSmi(S4, T0); - bne(S4, S5, &miss); - - // Fall through to unchecked entry. - ASSERT(CodeSize() == Instructions::kUncheckedEntryOffset); - - set_use_far_branches(saved_use_far_branches); -} - - -#ifndef PRODUCT -void Assembler::MaybeTraceAllocation(intptr_t cid, - Register temp_reg, - Label* trace) { - ASSERT(cid > 0); - ASSERT(!in_delay_slot_); - ASSERT(temp_reg != kNoRegister); - ASSERT(temp_reg != TMP); - intptr_t state_offset = ClassTable::StateOffsetFor(cid); - LoadIsolate(temp_reg); - intptr_t table_offset = - Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); - lw(temp_reg, Address(temp_reg, table_offset)); - AddImmediate(temp_reg, state_offset); - lw(temp_reg, Address(temp_reg, 0)); - andi(CMPRES1, temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); - bne(CMPRES1, ZR, trace); -} - - -void Assembler::UpdateAllocationStats(intptr_t cid, - Register temp_reg, - Heap::Space space) { - ASSERT(!in_delay_slot_); - ASSERT(temp_reg != kNoRegister); - ASSERT(temp_reg != TMP); - ASSERT(cid > 0); - intptr_t counter_offset = - ClassTable::CounterOffsetFor(cid, space == Heap::kNew); - LoadIsolate(temp_reg); - intptr_t table_offset = - Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); - lw(temp_reg, Address(temp_reg, table_offset)); - AddImmediate(temp_reg, counter_offset); - lw(TMP, Address(temp_reg, 0)); - AddImmediate(TMP, 1); - sw(TMP, Address(temp_reg, 0)); -} - - -void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, - Register size_reg, - Register temp_reg, - Heap::Space space) { - ASSERT(!in_delay_slot_); - ASSERT(temp_reg != kNoRegister); - ASSERT(cid > 0); - ASSERT(temp_reg != TMP); - const uword class_offset = ClassTable::ClassOffsetFor(cid); - const uword count_field_offset = - (space == Heap::kNew) - ? ClassHeapStats::allocated_since_gc_new_space_offset() - : ClassHeapStats::allocated_since_gc_old_space_offset(); - const uword size_field_offset = - (space == Heap::kNew) - ? ClassHeapStats::allocated_size_since_gc_new_space_offset() - : ClassHeapStats::allocated_size_since_gc_old_space_offset(); - LoadIsolate(temp_reg); - intptr_t table_offset = - Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); - lw(temp_reg, Address(temp_reg, table_offset)); - AddImmediate(temp_reg, class_offset); - lw(TMP, Address(temp_reg, count_field_offset)); - AddImmediate(TMP, 1); - sw(TMP, Address(temp_reg, count_field_offset)); - lw(TMP, Address(temp_reg, size_field_offset)); - addu(TMP, TMP, size_reg); - sw(TMP, Address(temp_reg, size_field_offset)); -} -#endif // !PRODUCT - - -void Assembler::TryAllocate(const Class& cls, - Label* failure, - Register instance_reg, - Register temp_reg) { - ASSERT(!in_delay_slot_); - ASSERT(failure != NULL); - if (FLAG_inline_alloc) { - // If this allocation is traced, program will jump to failure path - // (i.e. the allocation stub) which will allocate the object and trace the - // allocation call site. - NOT_IN_PRODUCT(MaybeTraceAllocation(cls.id(), temp_reg, failure)); - const intptr_t instance_size = cls.instance_size(); - Heap::Space space = Heap::kNew; - lw(temp_reg, Address(THR, Thread::heap_offset())); - lw(instance_reg, Address(temp_reg, Heap::TopOffset(space))); - // TODO(koda): Protect against unsigned overflow here. - AddImmediate(instance_reg, instance_size); - - // instance_reg: potential next object start. - lw(TMP, Address(temp_reg, Heap::EndOffset(space))); - // Fail if heap end unsigned less than or equal to instance_reg. - BranchUnsignedLessEqual(TMP, instance_reg, failure); - - // Successfully allocated the object, now update top to point to - // next object start and store the class in the class field of object. - sw(instance_reg, Address(temp_reg, Heap::TopOffset(space))); - - ASSERT(instance_size >= kHeapObjectTag); - AddImmediate(instance_reg, -instance_size + kHeapObjectTag); - NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), temp_reg, space)); - uint32_t tags = 0; - tags = RawObject::SizeTag::update(instance_size, tags); - ASSERT(cls.id() != kIllegalCid); - tags = RawObject::ClassIdTag::update(cls.id(), tags); - LoadImmediate(TMP, tags); - sw(TMP, FieldAddress(instance_reg, Object::tags_offset())); - } else { - b(failure); - } -} - - -void Assembler::TryAllocateArray(intptr_t cid, - intptr_t instance_size, - Label* failure, - Register instance, - Register end_address, - Register temp1, - Register temp2) { - if (FLAG_inline_alloc) { - // If this allocation is traced, program will jump to failure path - // (i.e. the allocation stub) which will allocate the object and trace the - // allocation call site. - NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp1, failure)); - Isolate* isolate = Isolate::Current(); - Heap* heap = isolate->heap(); - Heap::Space space = Heap::kNew; - lw(temp1, Address(THR, Thread::heap_offset())); - // Potential new object start. - lw(instance, Address(temp1, heap->TopOffset(space))); - // Potential next object start. - AddImmediate(end_address, instance, instance_size); - // Branch on unsigned overflow. - BranchUnsignedLess(end_address, instance, failure); - - // Check if the allocation fits into the remaining space. - // instance: potential new object start, /* inline_isolate = */ false. - // end_address: potential next object start. - lw(temp2, Address(temp1, Heap::EndOffset(space))); - BranchUnsignedGreaterEqual(end_address, temp2, failure); - - // Successfully allocated the object(s), now update top to point to - // next object start and initialize the object. - sw(end_address, Address(temp1, Heap::TopOffset(space))); - addiu(instance, instance, Immediate(kHeapObjectTag)); - LoadImmediate(temp1, instance_size); - NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, temp1, temp2, space)); - - // Initialize the tags. - // instance: new object start as a tagged pointer. - uint32_t tags = 0; - tags = RawObject::ClassIdTag::update(cid, tags); - tags = RawObject::SizeTag::update(instance_size, tags); - LoadImmediate(temp1, tags); - sw(temp1, FieldAddress(instance, Array::tags_offset())); // Store tags. - } else { - b(failure); - } -} - - -void Assembler::CallRuntime(const RuntimeEntry& entry, - intptr_t argument_count) { - entry.Call(this, argument_count); -} - - -void Assembler::EnterDartFrame(intptr_t frame_size) { - ASSERT(!in_delay_slot_); - - SetPrologueOffset(); - - addiu(SP, SP, Immediate(-4 * kWordSize)); - sw(RA, Address(SP, 3 * kWordSize)); - sw(FP, Address(SP, 2 * kWordSize)); - sw(CODE_REG, Address(SP, 1 * kWordSize)); - sw(PP, Address(SP, 0 * kWordSize)); - - // Set FP to the saved previous FP. - addiu(FP, SP, Immediate(2 * kWordSize)); - - LoadPoolPointer(); - - // Reserve space for locals. - AddImmediate(SP, -frame_size); -} - - -// On entry to a function compiled for OSR, the caller's frame pointer, the -// stack locals, and any copied parameters are already in place. The frame -// pointer is already set up. The PC marker is not correct for the -// optimized function and there may be extra space for spill slots to -// allocate. We must also set up the pool pointer for the function. -void Assembler::EnterOsrFrame(intptr_t extra_size) { - ASSERT(!in_delay_slot_); - Comment("EnterOsrFrame"); - - // Restore return address. - lw(RA, Address(FP, 1 * kWordSize)); - - // Load the pool pointer. offset has already been subtracted from temp. - RestoreCodePointer(); - LoadPoolPointer(); - - // Reserve space for locals. - AddImmediate(SP, -extra_size); -} - - -void Assembler::LeaveDartFrame(RestorePP restore_pp) { - ASSERT(!in_delay_slot_); - addiu(SP, FP, Immediate(-2 * kWordSize)); - - lw(RA, Address(SP, 3 * kWordSize)); - lw(FP, Address(SP, 2 * kWordSize)); - if (restore_pp == kRestoreCallerPP) { - lw(PP, Address(SP, 0 * kWordSize)); - } - - // Adjust SP for PC, RA, FP, PP pushed in EnterDartFrame. - addiu(SP, SP, Immediate(4 * kWordSize)); -} - - -void Assembler::LeaveDartFrameAndReturn(Register ra) { - ASSERT(!in_delay_slot_); - addiu(SP, FP, Immediate(-2 * kWordSize)); - - lw(RA, Address(SP, 3 * kWordSize)); - lw(FP, Address(SP, 2 * kWordSize)); - lw(PP, Address(SP, 0 * kWordSize)); - - // Adjust SP for PC, RA, FP, PP pushed in EnterDartFrame, and return. - jr(ra); - delay_slot()->addiu(SP, SP, Immediate(4 * kWordSize)); -} - - -void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { - ASSERT(!in_delay_slot_); - // Reserve space for arguments and align frame before entering - // the C++ world. - AddImmediate(SP, -frame_space); - if (OS::ActivationFrameAlignment() > 1) { - LoadImmediate(TMP, ~(OS::ActivationFrameAlignment() - 1)); - and_(SP, SP, TMP); - } -} - - -void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { - ASSERT(!in_delay_slot_); - const intptr_t kPushedRegistersSize = kDartVolatileCpuRegCount * kWordSize + - 3 * kWordSize + // PP, FP and RA. - kDartVolatileFpuRegCount * kWordSize; - - SetPrologueOffset(); - - Comment("EnterCallRuntimeFrame"); - - // Save volatile CPU and FPU registers on the stack: - // ------------- - // FPU Registers - // CPU Registers - // RA - // FP - // ------------- - // TODO(zra): It may be a problem for walking the stack that FP is below - // the saved registers. If it turns out to be a problem in the - // future, try pushing RA and FP before the volatile registers. - addiu(SP, SP, Immediate(-kPushedRegistersSize)); - for (int i = kDartFirstVolatileFpuReg; i <= kDartLastVolatileFpuReg; i++) { - // These go above the volatile CPU registers. - const int slot = - (i - kDartFirstVolatileFpuReg) + kDartVolatileCpuRegCount + 3; - FRegister reg = static_cast(i); - swc1(reg, Address(SP, slot * kWordSize)); - } - for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) { - // + 2 because FP goes in slot 0. - const int slot = (i - kDartFirstVolatileCpuReg) + 3; - Register reg = static_cast(i); - sw(reg, Address(SP, slot * kWordSize)); - } - sw(RA, Address(SP, 2 * kWordSize)); - sw(FP, Address(SP, 1 * kWordSize)); - sw(PP, Address(SP, 0 * kWordSize)); - LoadPoolPointer(); - - mov(FP, SP); - - ReserveAlignedFrameSpace(frame_space); -} - - -void Assembler::LeaveCallRuntimeFrame() { - ASSERT(!in_delay_slot_); - const intptr_t kPushedRegistersSize = kDartVolatileCpuRegCount * kWordSize + - 3 * kWordSize + // FP and RA. - kDartVolatileFpuRegCount * kWordSize; - - Comment("LeaveCallRuntimeFrame"); - - // SP might have been modified to reserve space for arguments - // and ensure proper alignment of the stack frame. - // We need to restore it before restoring registers. - mov(SP, FP); - - // Restore volatile CPU and FPU registers from the stack. - lw(PP, Address(SP, 0 * kWordSize)); - lw(FP, Address(SP, 1 * kWordSize)); - lw(RA, Address(SP, 2 * kWordSize)); - for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) { - // + 2 because FP goes in slot 0. - const int slot = (i - kDartFirstVolatileCpuReg) + 3; - Register reg = static_cast(i); - lw(reg, Address(SP, slot * kWordSize)); - } - for (int i = kDartFirstVolatileFpuReg; i <= kDartLastVolatileFpuReg; i++) { - // These go above the volatile CPU registers. - const int slot = - (i - kDartFirstVolatileFpuReg) + kDartVolatileCpuRegCount + 3; - FRegister reg = static_cast(i); - lwc1(reg, Address(SP, slot * kWordSize)); - } - addiu(SP, SP, Immediate(kPushedRegistersSize)); -} - - -Address Assembler::ElementAddressForIntIndex(bool is_external, - intptr_t cid, - intptr_t index_scale, - Register array, - intptr_t index) const { - const int64_t offset = - index * index_scale + - (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); - ASSERT(Utils::IsInt(32, offset)); - ASSERT(Address::CanHoldOffset(offset)); - return Address(array, static_cast(offset)); -} - - -void Assembler::LoadElementAddressForIntIndex(Register address, - bool is_external, - intptr_t cid, - intptr_t index_scale, - Register array, - intptr_t index) { - const int64_t offset = - index * index_scale + - (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); - AddImmediate(address, array, offset); -} - - -Address Assembler::ElementAddressForRegIndex(bool is_load, - bool is_external, - intptr_t cid, - intptr_t index_scale, - Register array, - Register index) { - // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. - const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; - const int32_t offset = - is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); - ASSERT(array != TMP); - ASSERT(index != TMP); - const Register base = is_load ? TMP : index; - if (shift < 0) { - ASSERT(shift == -1); - sra(TMP, index, 1); - addu(base, array, TMP); - } else if (shift == 0) { - addu(base, array, index); - } else { - sll(TMP, index, shift); - addu(base, array, TMP); - } - ASSERT(Address::CanHoldOffset(offset)); - return Address(base, offset); -} - - -void Assembler::LoadElementAddressForRegIndex(Register address, - bool is_load, - bool is_external, - intptr_t cid, - intptr_t index_scale, - Register array, - Register index) { - // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. - const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; - const int32_t offset = - is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); - if (shift < 0) { - ASSERT(shift == -1); - sra(address, index, 1); - addu(address, array, address); - } else if (shift == 0) { - addu(address, array, index); - } else { - sll(address, index, shift); - addu(address, array, address); - } - if (offset != 0) { - AddImmediate(address, offset); - } -} - - -void Assembler::LoadHalfWordUnaligned(Register dst, - Register addr, - Register tmp) { - ASSERT(dst != addr); - lbu(dst, Address(addr, 0)); - lb(tmp, Address(addr, 1)); - sll(tmp, tmp, 8); - or_(dst, dst, tmp); -} - - -void Assembler::LoadHalfWordUnsignedUnaligned(Register dst, - Register addr, - Register tmp) { - ASSERT(dst != addr); - lbu(dst, Address(addr, 0)); - lbu(tmp, Address(addr, 1)); - sll(tmp, tmp, 8); - or_(dst, dst, tmp); -} - - -void Assembler::StoreHalfWordUnaligned(Register src, - Register addr, - Register tmp) { - sb(src, Address(addr, 0)); - srl(tmp, src, 8); - sb(tmp, Address(addr, 1)); -} - - -void Assembler::LoadWordUnaligned(Register dst, Register addr, Register tmp) { - // TODO(rmacnak): LWL + LWR - ASSERT(dst != addr); - lbu(dst, Address(addr, 0)); - lbu(tmp, Address(addr, 1)); - sll(tmp, tmp, 8); - or_(dst, dst, tmp); - lbu(tmp, Address(addr, 2)); - sll(tmp, tmp, 16); - or_(dst, dst, tmp); - lbu(tmp, Address(addr, 3)); - sll(tmp, tmp, 24); - or_(dst, dst, tmp); -} - - -void Assembler::StoreWordUnaligned(Register src, Register addr, Register tmp) { - // TODO(rmacnak): SWL + SWR - sb(src, Address(addr, 0)); - srl(tmp, src, 8); - sb(tmp, Address(addr, 1)); - srl(tmp, src, 16); - sb(tmp, Address(addr, 2)); - srl(tmp, src, 24); - sb(tmp, Address(addr, 3)); -} - - -static const char* cpu_reg_names[kNumberOfCpuRegisters] = { - "zr", "tmp", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2", - "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", - "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra", -}; - - -const char* Assembler::RegisterName(Register reg) { - ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); - return cpu_reg_names[reg]; -} - - -static const char* fpu_reg_names[kNumberOfFpuRegisters] = { - "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", - "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", -}; - - -const char* Assembler::FpuRegisterName(FpuRegister reg) { - ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); - return fpu_reg_names[reg]; -} - - -void Assembler::Stop(const char* message) { - if (FLAG_print_stop_message) { - UNIMPLEMENTED(); - } - Label stop; - b(&stop); - Emit(reinterpret_cast(message)); - Bind(&stop); - break_(Instr::kStopMessageCode); -} - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/assembler_mips.h b/runtime/vm/assembler_mips.h deleted file mode 100644 index 7073e307883..00000000000 --- a/runtime/vm/assembler_mips.h +++ /dev/null @@ -1,1722 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#ifndef RUNTIME_VM_ASSEMBLER_MIPS_H_ -#define RUNTIME_VM_ASSEMBLER_MIPS_H_ - -#ifndef RUNTIME_VM_ASSEMBLER_H_ -#error Do not include assembler_mips.h directly; use assembler.h instead. -#endif - -#include "platform/assert.h" -#include "platform/utils.h" -#include "vm/constants_mips.h" -#include "vm/hash_map.h" -#include "vm/object.h" -#include "vm/simulator.h" - -// References to documentation in this file refer to: -// "MIPS® Architecture For Programmers Volume I-A: -// Introduction to the MIPS32® Architecture" in short "VolI-A" -// and -// "MIPS® Architecture For Programmers Volume II-A: -// The MIPS32® Instruction Set" in short "VolII-A" -namespace dart { - -// Forward declarations. -class RuntimeEntry; -class StubEntry; - -class Immediate : public ValueObject { - public: - explicit Immediate(int32_t value) : value_(value) {} - - Immediate(const Immediate& other) : ValueObject(), value_(other.value_) {} - Immediate& operator=(const Immediate& other) { - value_ = other.value_; - return *this; - } - - private: - int32_t value_; - - int32_t value() const { return value_; } - - friend class Assembler; -}; - - -class Address : public ValueObject { - public: - explicit Address(Register base, int32_t offset = 0) - : ValueObject(), base_(base), offset_(offset) {} - - // This addressing mode does not exist. - Address(Register base, Register offset); - - Address(const Address& other) - : ValueObject(), base_(other.base_), offset_(other.offset_) {} - Address& operator=(const Address& other) { - base_ = other.base_; - offset_ = other.offset_; - return *this; - } - - uint32_t encoding() const { - ASSERT(Utils::IsInt(kImmBits, offset_)); - uint16_t imm_value = static_cast(offset_); - return (base_ << kRsShift) | imm_value; - } - - static bool CanHoldOffset(int32_t offset) { - return Utils::IsInt(kImmBits, offset); - } - - Register base() const { return base_; } - int32_t offset() const { return offset_; } - - private: - Register base_; - int32_t offset_; -}; - - -class FieldAddress : public Address { - public: - FieldAddress(Register base, int32_t disp) - : Address(base, disp - kHeapObjectTag) {} - - FieldAddress(const FieldAddress& other) : Address(other) {} - - FieldAddress& operator=(const FieldAddress& other) { - Address::operator=(other); - return *this; - } -}; - - -class Label : public ValueObject { - public: - Label() : position_(0) {} - - ~Label() { - // Assert if label is being destroyed with unresolved branches pending. - ASSERT(!IsLinked()); - } - - // Returns the position for bound and linked labels. Cannot be used - // for unused labels. - intptr_t Position() const { - ASSERT(!IsUnused()); - return IsBound() ? -position_ - kWordSize : position_ - kWordSize; - } - - bool IsBound() const { return position_ < 0; } - bool IsUnused() const { return position_ == 0; } - bool IsLinked() const { return position_ > 0; } - - private: - intptr_t position_; - - void Reinitialize() { position_ = 0; } - - void BindTo(intptr_t position) { - ASSERT(!IsBound()); - position_ = -position - kWordSize; - ASSERT(IsBound()); - } - - void LinkTo(intptr_t position) { - ASSERT(!IsBound()); - position_ = position + kWordSize; - ASSERT(IsLinked()); - } - - friend class Assembler; - DISALLOW_COPY_AND_ASSIGN(Label); -}; - - -// There is no dedicated status register on MIPS, but Condition values are used -// and passed around by the intermediate language, so we need a Condition type. -// We delay code generation of a comparison that would result in a traditional -// condition code in the status register by keeping both register operands and -// the relational operator between them as the Condition. -class Condition : public ValueObject { - public: - enum Bits { - kLeftPos = 0, - kLeftSize = 6, - kRightPos = kLeftPos + kLeftSize, - kRightSize = 6, - kRelOpPos = kRightPos + kRightSize, - kRelOpSize = 4, - kImmPos = kRelOpPos + kRelOpSize, - kImmSize = 16, - }; - - class LeftBits : public BitField {}; - class RightBits : public BitField {}; - class RelOpBits - : public BitField {}; - class ImmBits : public BitField {}; - - Register left() const { - ASSERT(IsValid()); - return LeftBits::decode(bits_); - } - - Register right() const { - ASSERT(IsValid()); - return RightBits::decode(bits_); - } - RelationOperator rel_op() const { return RelOpBits::decode(bits_); } - int16_t imm() const { - ASSERT(IsValid()); - return static_cast(ImmBits::decode(bits_)); - } - - static bool IsValidImm(int32_t value) { - // We want both value and value + 1 to fit in an int16_t. - return (-0x08000 <= value) && (value < 0x7fff); - } - - void set_rel_op(RelationOperator value) { - ASSERT(IsValidRelOp(value)); - bits_ = RelOpBits::update(value, bits_); - } - - bool IsValid() const { return rel_op() != INVALID_RELATION; } - - // Uninitialized condition. - Condition() : ValueObject(), bits_(RelOpBits::update(INVALID_RELATION, 0)) {} - - // Copy constructor. - Condition(const Condition& other) : ValueObject(), bits_(other.bits_) {} - - // Copy assignment operator. - Condition& operator=(const Condition& other) { - bits_ = other.bits_; - return *this; - } - - Condition(Register left, - Register right, - RelationOperator rel_op, - int16_t imm = 0) { - // At most one constant, ZR or immediate. - ASSERT(!(((left == ZR) || (left == IMM)) && - ((right == ZR) || (right == IMM)))); - // Non-zero immediate value is only allowed for IMM. - ASSERT((imm != 0) == ((left == IMM) || (right == IMM))); - set_left(left); - set_right(right); - if (rel_op == INVALID_RELATION) { - SetToInvalidState(); - } else { - set_rel_op(rel_op); - } - set_imm(imm); - } - - private: - void SetToInvalidState() { - bits_ = RelOpBits::update(INVALID_RELATION, bits_); - } - - static bool IsValidRelOp(RelationOperator value) { - return (AL <= value) && (value <= ULE); - } - - static bool IsValidRegister(Register value) { - return (ZR <= value) && (value <= IMM) && (value != AT); - } - - void set_left(Register value) { - ASSERT(IsValidRegister(value)); - bits_ = LeftBits::update(value, bits_); - } - - void set_right(Register value) { - ASSERT(IsValidRegister(value)); - bits_ = RightBits::update(value, bits_); - } - - void set_imm(int16_t value) { - ASSERT(IsValidImm(value)); - bits_ = ImmBits::update(static_cast(value), bits_); - } - - uword bits_; -}; - - -class Assembler : public ValueObject { - public: - explicit Assembler(bool use_far_branches = false) - : buffer_(), - prologue_offset_(-1), - has_single_entry_point_(true), - use_far_branches_(use_far_branches), - delay_slot_available_(false), - in_delay_slot_(false), - comments_(), - constant_pool_allowed_(true) {} - ~Assembler() {} - - void PopRegister(Register r) { Pop(r); } - - void Bind(Label* label); - void Jump(Label* label) { b(label); } - - // Misc. functionality - intptr_t CodeSize() const { return buffer_.Size(); } - intptr_t prologue_offset() const { return prologue_offset_; } - bool has_single_entry_point() const { return has_single_entry_point_; } - - // Count the fixups that produce a pointer offset, without processing - // the fixups. - intptr_t CountPointerOffsets() const { return buffer_.CountPointerOffsets(); } - - const ZoneGrowableArray& GetPointerOffsets() const { - return buffer_.pointer_offsets(); - } - - ObjectPoolWrapper& object_pool_wrapper() { return object_pool_wrapper_; } - - RawObjectPool* MakeObjectPool() { - return object_pool_wrapper_.MakeObjectPool(); - } - - void FinalizeInstructions(const MemoryRegion& region) { - buffer_.FinalizeInstructions(region); - } - - bool use_far_branches() const { - return FLAG_use_far_branches || use_far_branches_; - } - - void set_use_far_branches(bool b) { use_far_branches_ = b; } - - void EnterFrame(); - void LeaveFrameAndReturn(); - - // Set up a stub frame so that the stack traversal code can easily identify - // a stub frame. - void EnterStubFrame(intptr_t frame_size = 0); - void LeaveStubFrame(); - // A separate macro for when a Ret immediately follows, so that we can use - // the branch delay slot. - void LeaveStubFrameAndReturn(Register ra = RA); - - void MonomorphicCheckedEntry(); - - void UpdateAllocationStats(intptr_t cid, - Register temp_reg, - Heap::Space space); - - void UpdateAllocationStatsWithSize(intptr_t cid, - Register size_reg, - Register temp_reg, - Heap::Space space); - - - void MaybeTraceAllocation(intptr_t cid, Register temp_reg, Label* trace); - - // Inlined allocation of an instance of class 'cls', code has no runtime - // calls. Jump to 'failure' if the instance cannot be allocated here. - // Allocated instance is returned in 'instance_reg'. - // Only the tags field of the object is initialized. - void TryAllocate(const Class& cls, - Label* failure, - Register instance_reg, - Register temp_reg); - - void TryAllocateArray(intptr_t cid, - intptr_t instance_size, - Label* failure, - Register instance, - Register end_address, - Register temp1, - Register temp2); - - // Debugging and bringup support. - void Stop(const char* message); - void Unimplemented(const char* message); - void Untested(const char* message); - void Unreachable(const char* message); - - static void InitializeMemoryWithBreakpoints(uword data, intptr_t length); - - void Comment(const char* format, ...) PRINTF_ATTRIBUTE(2, 3); - static bool EmittingComments(); - - const Code::Comments& GetCodeComments() const; - - static const char* RegisterName(Register reg); - - static const char* FpuRegisterName(FpuRegister reg); - - void SetPrologueOffset() { - if (prologue_offset_ == -1) { - prologue_offset_ = CodeSize(); - } - } - - // A utility to be able to assemble an instruction into the delay slot. - Assembler* delay_slot() { - ASSERT(delay_slot_available_); - ASSERT(buffer_.Load(buffer_.GetPosition() - sizeof(int32_t)) == - Instr::kNopInstruction); - buffer_.Remit(); - delay_slot_available_ = false; - in_delay_slot_ = true; - return this; - } - - // CPU instructions in alphabetical order. - void addd(DRegister dd, DRegister ds, DRegister dt) { - // DRegisters start at the even FRegisters. - FRegister fd = static_cast(dd * 2); - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_ADD); - } - - void addiu(Register rt, Register rs, const Immediate& imm) { - ASSERT(Utils::IsInt(kImmBits, imm.value())); - const uint16_t imm_value = static_cast(imm.value()); - EmitIType(ADDIU, rs, rt, imm_value); - } - - void addu(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, ADDU); - } - - void and_(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, AND); - } - - void andi(Register rt, Register rs, const Immediate& imm) { - ASSERT(Utils::IsUint(kImmBits, imm.value())); - const uint16_t imm_value = static_cast(imm.value()); - EmitIType(ANDI, rs, rt, imm_value); - } - - // Unconditional branch. - void b(Label* l) { beq(R0, R0, l); } - - void bal(Label* l) { - ASSERT(!in_delay_slot_); - EmitRegImmBranch(BGEZAL, R0, l); - EmitBranchDelayNop(); - } - - // Branch on floating point false. - void bc1f(Label* l) { - EmitFpuBranch(false, l); - EmitBranchDelayNop(); - } - - // Branch on floating point true. - void bc1t(Label* l) { - EmitFpuBranch(true, l); - EmitBranchDelayNop(); - } - - // Branch if equal. - void beq(Register rs, Register rt, Label* l) { - ASSERT(!in_delay_slot_); - EmitBranch(BEQ, rs, rt, l); - EmitBranchDelayNop(); - } - - // Branch if equal, likely taken. - // Delay slot executed only when branch taken. - void beql(Register rs, Register rt, Label* l) { - ASSERT(!in_delay_slot_); - EmitBranch(BEQL, rs, rt, l); - EmitBranchDelayNop(); - } - - // Branch if rs >= 0. - void bgez(Register rs, Label* l) { - ASSERT(!in_delay_slot_); - EmitRegImmBranch(BGEZ, rs, l); - EmitBranchDelayNop(); - } - - // Branch if rs >= 0, likely taken. - // Delay slot executed only when branch taken. - void bgezl(Register rs, Label* l) { - ASSERT(!in_delay_slot_); - EmitRegImmBranch(BGEZL, rs, l); - EmitBranchDelayNop(); - } - - // Branch if rs > 0. - void bgtz(Register rs, Label* l) { - ASSERT(!in_delay_slot_); - EmitBranch(BGTZ, rs, R0, l); - EmitBranchDelayNop(); - } - - // Branch if rs > 0, likely taken. - // Delay slot executed only when branch taken. - void bgtzl(Register rs, Label* l) { - ASSERT(!in_delay_slot_); - EmitBranch(BGTZL, rs, R0, l); - EmitBranchDelayNop(); - } - - // Branch if rs <= 0. - void blez(Register rs, Label* l) { - ASSERT(!in_delay_slot_); - EmitBranch(BLEZ, rs, R0, l); - EmitBranchDelayNop(); - } - - // Branch if rs <= 0, likely taken. - // Delay slot executed only when branch taken. - void blezl(Register rs, Label* l) { - ASSERT(!in_delay_slot_); - EmitBranch(BLEZL, rs, R0, l); - EmitBranchDelayNop(); - } - - // Branch if rs < 0. - void bltz(Register rs, Label* l) { - ASSERT(!in_delay_slot_); - EmitRegImmBranch(BLTZ, rs, l); - EmitBranchDelayNop(); - } - - // Branch if rs < 0, likely taken. - // Delay slot executed only when branch taken. - void bltzl(Register rs, Label* l) { - ASSERT(!in_delay_slot_); - EmitRegImmBranch(BLTZL, rs, l); - EmitBranchDelayNop(); - } - - // Branch if not equal. - void bne(Register rs, Register rt, Label* l) { - ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. - EmitBranch(BNE, rs, rt, l); - EmitBranchDelayNop(); - } - - // Branch if not equal, likely taken. - // Delay slot executed only when branch taken. - void bnel(Register rs, Register rt, Label* l) { - ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. - EmitBranch(BNEL, rs, rt, l); - EmitBranchDelayNop(); - } - - static int32_t BreakEncoding(int32_t code) { - ASSERT(Utils::IsUint(20, code)); - return SPECIAL << kOpcodeShift | code << kBreakCodeShift | - BREAK << kFunctionShift; - } - - - void break_(int32_t code) { Emit(BreakEncoding(code)); } - - static uword GetBreakInstructionFiller() { return BreakEncoding(0); } - - // FPU compare, always false. - void cfd(DRegister ds, DRegister dt) { - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_F); - } - - // FPU compare, true if unordered, i.e. one is NaN. - void cund(DRegister ds, DRegister dt) { - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_UN); - } - - // FPU compare, true if equal. - void ceqd(DRegister ds, DRegister dt) { - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_EQ); - } - - // FPU compare, true if unordered or equal. - void cueqd(DRegister ds, DRegister dt) { - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_UEQ); - } - - // FPU compare, true if less than. - void coltd(DRegister ds, DRegister dt) { - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_OLT); - } - - // FPU compare, true if unordered or less than. - void cultd(DRegister ds, DRegister dt) { - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_ULT); - } - - // FPU compare, true if less or equal. - void coled(DRegister ds, DRegister dt) { - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_OLE); - } - - // FPU compare, true if unordered or less or equal. - void culed(DRegister ds, DRegister dt) { - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_ULE); - } - - void clo(Register rd, Register rs) { - EmitRType(SPECIAL2, rs, rd, rd, 0, CLO); - } - - void clz(Register rd, Register rs) { - EmitRType(SPECIAL2, rs, rd, rd, 0, CLZ); - } - - // Convert a double in ds to a 32-bit signed int in fd rounding towards 0. - void truncwd(FRegister fd, DRegister ds) { - FRegister fs = static_cast(ds * 2); - EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_TRUNC_W); - } - - // Convert a 32-bit float in fs to a 64-bit double in dd. - void cvtds(DRegister dd, FRegister fs) { - FRegister fd = static_cast(dd * 2); - EmitFpuRType(COP1, FMT_S, F0, fs, fd, COP1_CVT_D); - } - - // Converts a 32-bit signed int in fs to a double in fd. - void cvtdw(DRegister dd, FRegister fs) { - FRegister fd = static_cast(dd * 2); - EmitFpuRType(COP1, FMT_W, F0, fs, fd, COP1_CVT_D); - } - - // Convert a 64-bit double in ds to a 32-bit float in fd. - void cvtsd(FRegister fd, DRegister ds) { - FRegister fs = static_cast(ds * 2); - EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_CVT_S); - } - - void div(Register rs, Register rt) { EmitRType(SPECIAL, rs, rt, R0, 0, DIV); } - - void divd(DRegister dd, DRegister ds, DRegister dt) { - FRegister fd = static_cast(dd * 2); - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_DIV); - } - - void divu(Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, R0, 0, DIVU); - } - - void jalr(Register rs, Register rd = RA) { - ASSERT(rs != rd); - ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. - EmitRType(SPECIAL, rs, R0, rd, 0, JALR); - EmitBranchDelayNop(); - } - - void jr(Register rs) { - ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. - EmitRType(SPECIAL, rs, R0, R0, 0, JR); - EmitBranchDelayNop(); - } - - void lb(Register rt, const Address& addr) { EmitLoadStore(LB, rt, addr); } - - void lbu(Register rt, const Address& addr) { EmitLoadStore(LBU, rt, addr); } - - void ldc1(DRegister dt, const Address& addr) { - FRegister ft = static_cast(dt * 2); - EmitFpuLoadStore(LDC1, ft, addr); - } - - void lh(Register rt, const Address& addr) { EmitLoadStore(LH, rt, addr); } - - void lhu(Register rt, const Address& addr) { EmitLoadStore(LHU, rt, addr); } - - void ll(Register rt, const Address& addr) { EmitLoadStore(LL, rt, addr); } - - void lui(Register rt, const Immediate& imm) { - ASSERT(Utils::IsUint(kImmBits, imm.value())); - const uint16_t imm_value = static_cast(imm.value()); - EmitIType(LUI, R0, rt, imm_value); - } - - void lw(Register rt, const Address& addr) { EmitLoadStore(LW, rt, addr); } - - void lwc1(FRegister ft, const Address& addr) { - EmitFpuLoadStore(LWC1, ft, addr); - } - - void madd(Register rs, Register rt) { - EmitRType(SPECIAL2, rs, rt, R0, 0, MADD); - } - - void maddu(Register rs, Register rt) { - EmitRType(SPECIAL2, rs, rt, R0, 0, MADDU); - } - - void mfc1(Register rt, FRegister fs) { - Emit(COP1 << kOpcodeShift | COP1_MF << kCop1SubShift | rt << kRtShift | - fs << kFsShift); - } - - void mfhi(Register rd) { EmitRType(SPECIAL, R0, R0, rd, 0, MFHI); } - - void mflo(Register rd) { EmitRType(SPECIAL, R0, R0, rd, 0, MFLO); } - - void mov(Register rd, Register rs) { or_(rd, rs, ZR); } - - void movd(DRegister dd, DRegister ds) { - FRegister fd = static_cast(dd * 2); - FRegister fs = static_cast(ds * 2); - EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_MOV); - } - - // Move if floating point false. - void movf(Register rd, Register rs) { - EmitRType(SPECIAL, rs, R0, rd, 0, MOVCI); - } - - void movn(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, MOVN); - } - - // Move if floating point true. - void movt(Register rd, Register rs) { - EmitRType(SPECIAL, rs, R1, rd, 0, MOVCI); - } - - // rd <- (rt == 0) ? rs : rd; - void movz(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, MOVZ); - } - - void movs(FRegister fd, FRegister fs) { - EmitFpuRType(COP1, FMT_S, F0, fs, fd, COP1_MOV); - } - - void mtc1(Register rt, FRegister fs) { - Emit(COP1 << kOpcodeShift | COP1_MT << kCop1SubShift | rt << kRtShift | - fs << kFsShift); - } - - void mthi(Register rs) { EmitRType(SPECIAL, rs, R0, R0, 0, MTHI); } - - void mtlo(Register rs) { EmitRType(SPECIAL, rs, R0, R0, 0, MTLO); } - - void muld(DRegister dd, DRegister ds, DRegister dt) { - FRegister fd = static_cast(dd * 2); - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_MUL); - } - - void mult(Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, R0, 0, MULT); - } - - void multu(Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, R0, 0, MULTU); - } - - void negd(DRegister dd, DRegister ds) { - FRegister fd = static_cast(dd * 2); - FRegister fs = static_cast(ds * 2); - EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_NEG); - } - - void nop() { Emit(Instr::kNopInstruction); } - - void nor(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, NOR); - } - - void or_(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, OR); - } - - void ori(Register rt, Register rs, const Immediate& imm) { - ASSERT(Utils::IsUint(kImmBits, imm.value())); - const uint16_t imm_value = static_cast(imm.value()); - EmitIType(ORI, rs, rt, imm_value); - } - - void sb(Register rt, const Address& addr) { EmitLoadStore(SB, rt, addr); } - - // rt = 1 on success, 0 on failure. - void sc(Register rt, const Address& addr) { EmitLoadStore(SC, rt, addr); } - - void sdc1(DRegister dt, const Address& addr) { - FRegister ft = static_cast(dt * 2); - EmitFpuLoadStore(SDC1, ft, addr); - } - - void sh(Register rt, const Address& addr) { EmitLoadStore(SH, rt, addr); } - - void sll(Register rd, Register rt, int sa) { - EmitRType(SPECIAL, R0, rt, rd, sa, SLL); - } - - void sllv(Register rd, Register rt, Register rs) { - EmitRType(SPECIAL, rs, rt, rd, 0, SLLV); - } - - void slt(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, SLT); - } - - void slti(Register rt, Register rs, const Immediate& imm) { - ASSERT(Utils::IsInt(kImmBits, imm.value())); - const uint16_t imm_value = static_cast(imm.value()); - EmitIType(SLTI, rs, rt, imm_value); - } - - // Although imm argument is int32_t, it is interpreted as an uint32_t. - // For example, -1 stands for 0xffffffffUL: it is encoded as 0xffff in the - // instruction imm field and is then sign extended back to 0xffffffffUL. - void sltiu(Register rt, Register rs, const Immediate& imm) { - ASSERT(Utils::IsInt(kImmBits, imm.value())); - const uint16_t imm_value = static_cast(imm.value()); - EmitIType(SLTIU, rs, rt, imm_value); - } - - void sltu(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, SLTU); - } - - void sqrtd(DRegister dd, DRegister ds) { - FRegister fd = static_cast(dd * 2); - FRegister fs = static_cast(ds * 2); - EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_SQRT); - } - - void sra(Register rd, Register rt, int sa) { - EmitRType(SPECIAL, R0, rt, rd, sa, SRA); - } - - void srav(Register rd, Register rt, Register rs) { - EmitRType(SPECIAL, rs, rt, rd, 0, SRAV); - } - - void srl(Register rd, Register rt, int sa) { - EmitRType(SPECIAL, R0, rt, rd, sa, SRL); - } - - void srlv(Register rd, Register rt, Register rs) { - EmitRType(SPECIAL, rs, rt, rd, 0, SRLV); - } - - void subd(DRegister dd, DRegister ds, DRegister dt) { - FRegister fd = static_cast(dd * 2); - FRegister fs = static_cast(ds * 2); - FRegister ft = static_cast(dt * 2); - EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_SUB); - } - - void subu(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, SUBU); - } - - void sw(Register rt, const Address& addr) { EmitLoadStore(SW, rt, addr); } - - void swc1(FRegister ft, const Address& addr) { - EmitFpuLoadStore(SWC1, ft, addr); - } - - void xori(Register rt, Register rs, const Immediate& imm) { - ASSERT(Utils::IsUint(kImmBits, imm.value())); - const uint16_t imm_value = static_cast(imm.value()); - EmitIType(XORI, rs, rt, imm_value); - } - - void xor_(Register rd, Register rs, Register rt) { - EmitRType(SPECIAL, rs, rt, rd, 0, XOR); - } - - // Macros in alphabetical order. - - // Addition of rs and rt with the result placed in rd. - // After, ro < 0 if there was signed overflow, ro >= 0 otherwise. - // rd and ro must not be TMP. - // ro must be different from all the other registers. - // If rd, rs, and rt are the same register, then a scratch register different - // from the other registers is needed. - void AdduDetectOverflow(Register rd, - Register rs, - Register rt, - Register ro, - Register scratch = kNoRegister); - - // ro must be different from rd and rs. - // rd and ro must not be TMP. - // If rd and rs are the same, a scratch register different from the other - // registers is needed. - void AddImmediateDetectOverflow(Register rd, - Register rs, - int32_t imm, - Register ro, - Register scratch = kNoRegister) { - ASSERT(!in_delay_slot_); - LoadImmediate(rd, imm); - AdduDetectOverflow(rd, rs, rd, ro, scratch); - } - - // Subtraction of rt from rs (rs - rt) with the result placed in rd. - // After, ro < 0 if there was signed overflow, ro >= 0 otherwise. - // None of rd, rs, rt, or ro may be TMP. - // ro must be different from the other registers. - void SubuDetectOverflow(Register rd, Register rs, Register rt, Register ro); - - // ro must be different from rd and rs. - // None of rd, rs, rt, or ro may be TMP. - void SubImmediateDetectOverflow(Register rd, - Register rs, - int32_t imm, - Register ro) { - ASSERT(!in_delay_slot_); - LoadImmediate(rd, imm); - SubuDetectOverflow(rd, rs, rd, ro); - } - - void Branch(const StubEntry& stub_entry, Register pp = PP); - - void BranchLink(const StubEntry& stub_entry, - Patchability patchable = kNotPatchable); - - void BranchLinkPatchable(const StubEntry& stub_entry); - void BranchLinkToRuntime(); - - // Emit a call that shares its object pool entries with other calls - // that have the same equivalence marker. - void BranchLinkWithEquivalence(const StubEntry& stub_entry, - const Object& equivalence); - - void Drop(intptr_t stack_elements) { - ASSERT(stack_elements >= 0); - if (stack_elements > 0) { - addiu(SP, SP, Immediate(stack_elements * kWordSize)); - } - } - - void LoadPoolPointer(Register reg = PP) { - ASSERT(!in_delay_slot_); - CheckCodePointer(); - lw(reg, FieldAddress(CODE_REG, Code::object_pool_offset())); - set_constant_pool_allowed(reg == PP); - } - - void CheckCodePointer(); - - void RestoreCodePointer(); - - void LoadImmediate(Register rd, int32_t value) { - ASSERT(!in_delay_slot_); - if (Utils::IsInt(kImmBits, value)) { - addiu(rd, ZR, Immediate(value)); - } else { - const uint16_t low = Utils::Low16Bits(value); - const uint16_t high = Utils::High16Bits(value); - lui(rd, Immediate(high)); - if (low != 0) { - ori(rd, rd, Immediate(low)); - } - } - } - - void LoadImmediate(DRegister rd, double value) { - ASSERT(!in_delay_slot_); - FRegister frd = static_cast(rd * 2); - const int64_t ival = bit_cast(value); - const int32_t low = Utils::Low32Bits(ival); - const int32_t high = Utils::High32Bits(ival); - if (low != 0) { - LoadImmediate(TMP, low); - mtc1(TMP, frd); - } else { - mtc1(ZR, frd); - } - - if (high != 0) { - LoadImmediate(TMP, high); - mtc1(TMP, static_cast(frd + 1)); - } else { - mtc1(ZR, static_cast(frd + 1)); - } - } - - void LoadImmediate(FRegister rd, float value) { - ASSERT(!in_delay_slot_); - const int32_t ival = bit_cast(value); - if (ival == 0) { - mtc1(ZR, rd); - } else { - LoadImmediate(TMP, ival); - mtc1(TMP, rd); - } - } - - void AddImmediate(Register rd, Register rs, int32_t value) { - ASSERT(!in_delay_slot_); - if ((value == 0) && (rd == rs)) return; - // If value is 0, we still want to move rs to rd if they aren't the same. - if (Utils::IsInt(kImmBits, value)) { - addiu(rd, rs, Immediate(value)); - } else { - LoadImmediate(TMP, value); - addu(rd, rs, TMP); - } - } - - void AddImmediate(Register rd, int32_t value) { - ASSERT(!in_delay_slot_); - AddImmediate(rd, rd, value); - } - - void AndImmediate(Register rd, Register rs, int32_t imm) { - ASSERT(!in_delay_slot_); - if (imm == 0) { - mov(rd, ZR); - return; - } - - if (Utils::IsUint(kImmBits, imm)) { - andi(rd, rs, Immediate(imm)); - } else { - LoadImmediate(TMP, imm); - and_(rd, rs, TMP); - } - } - - void OrImmediate(Register rd, Register rs, int32_t imm) { - ASSERT(!in_delay_slot_); - if (imm == 0) { - mov(rd, rs); - return; - } - - if (Utils::IsUint(kImmBits, imm)) { - ori(rd, rs, Immediate(imm)); - } else { - LoadImmediate(TMP, imm); - or_(rd, rs, TMP); - } - } - - void XorImmediate(Register rd, Register rs, int32_t imm) { - ASSERT(!in_delay_slot_); - if (imm == 0) { - mov(rd, rs); - return; - } - - if (Utils::IsUint(kImmBits, imm)) { - xori(rd, rs, Immediate(imm)); - } else { - LoadImmediate(TMP, imm); - xor_(rd, rs, TMP); - } - } - - Register LoadConditionOperand(Register rd, - const Object& operand, - int16_t* imm) { - if (operand.IsSmi()) { - const int32_t val = reinterpret_cast(operand.raw()); - if (val == 0) { - return ZR; - } else if (Condition::IsValidImm(val)) { - ASSERT(*imm == 0); - *imm = val; - return IMM; - } - } - LoadObject(rd, operand); - return rd; - } - - // Branch to label if condition is true. - void BranchOnCondition(Condition cond, Label* l) { - ASSERT(!in_delay_slot_); - Register left = cond.left(); - Register right = cond.right(); - RelationOperator rel_op = cond.rel_op(); - switch (rel_op) { - case NV: - return; - case AL: - b(l); - return; - case EQ: // fall through. - case NE: { - if (left == IMM) { - addiu(AT, ZR, Immediate(cond.imm())); - left = AT; - } else if (right == IMM) { - addiu(AT, ZR, Immediate(cond.imm())); - right = AT; - } - if (rel_op == EQ) { - beq(left, right, l); - } else { - bne(left, right, l); - } - break; - } - case GT: { - if (left == ZR) { - bltz(right, l); - } else if (right == ZR) { - bgtz(left, l); - } else if (left == IMM) { - slti(AT, right, Immediate(cond.imm())); - bne(AT, ZR, l); - } else if (right == IMM) { - slti(AT, left, Immediate(cond.imm() + 1)); - beq(AT, ZR, l); - } else { - slt(AT, right, left); - bne(AT, ZR, l); - } - break; - } - case GE: { - if (left == ZR) { - blez(right, l); - } else if (right == ZR) { - bgez(left, l); - } else if (left == IMM) { - slti(AT, right, Immediate(cond.imm() + 1)); - bne(AT, ZR, l); - } else if (right == IMM) { - slti(AT, left, Immediate(cond.imm())); - beq(AT, ZR, l); - } else { - slt(AT, left, right); - beq(AT, ZR, l); - } - break; - } - case LT: { - if (left == ZR) { - bgtz(right, l); - } else if (right == ZR) { - bltz(left, l); - } else if (left == IMM) { - slti(AT, right, Immediate(cond.imm() + 1)); - beq(AT, ZR, l); - } else if (right == IMM) { - slti(AT, left, Immediate(cond.imm())); - bne(AT, ZR, l); - } else { - slt(AT, left, right); - bne(AT, ZR, l); - } - break; - } - case LE: { - if (left == ZR) { - bgez(right, l); - } else if (right == ZR) { - blez(left, l); - } else if (left == IMM) { - slti(AT, right, Immediate(cond.imm())); - beq(AT, ZR, l); - } else if (right == IMM) { - slti(AT, left, Immediate(cond.imm() + 1)); - bne(AT, ZR, l); - } else { - slt(AT, right, left); - beq(AT, ZR, l); - } - break; - } - case UGT: { - ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. - if (left == ZR) { - // NV: Never branch. Fall through. - } else if (right == ZR) { - bne(left, ZR, l); - } else { - sltu(AT, right, left); - bne(AT, ZR, l); - } - break; - } - case UGE: { - ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. - if (left == ZR) { - beq(right, ZR, l); - } else if (right == ZR) { - // AL: Always branch to l. - beq(ZR, ZR, l); - } else { - sltu(AT, left, right); - beq(AT, ZR, l); - } - break; - } - case ULT: { - ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. - if (left == ZR) { - bne(right, ZR, l); - } else if (right == ZR) { - // NV: Never branch. Fall through. - } else { - sltu(AT, left, right); - bne(AT, ZR, l); - } - break; - } - case ULE: { - ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. - if (left == ZR) { - // AL: Always branch to l. - beq(ZR, ZR, l); - } else if (right == ZR) { - beq(left, ZR, l); - } else { - sltu(AT, right, left); - beq(AT, ZR, l); - } - break; - } - default: - UNREACHABLE(); - } - } - - void BranchEqual(Register rd, Register rn, Label* l) { beq(rd, rn, l); } - - void BranchEqual(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - beq(rd, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - beq(rd, CMPRES2, l); - } - } - - void BranchEqual(Register rd, const Object& object, Label* l) { - ASSERT(!in_delay_slot_); - ASSERT(rd != CMPRES2); - LoadObject(CMPRES2, object); - beq(rd, CMPRES2, l); - } - - void BranchNotEqual(Register rd, Register rn, Label* l) { bne(rd, rn, l); } - - void BranchNotEqual(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - bne(rd, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - bne(rd, CMPRES2, l); - } - } - - void BranchNotEqual(Register rd, const Object& object, Label* l) { - ASSERT(!in_delay_slot_); - ASSERT(rd != CMPRES2); - LoadObject(CMPRES2, object); - bne(rd, CMPRES2, l); - } - - void BranchSignedGreater(Register rd, Register rs, Label* l) { - ASSERT(!in_delay_slot_); - slt(CMPRES2, rs, rd); // CMPRES2 = rd > rs ? 1 : 0. - bne(CMPRES2, ZR, l); - } - - void BranchSignedGreater(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - bgtz(rd, l); - } else { - if (Utils::IsInt(kImmBits, imm.value() + 1)) { - slti(CMPRES2, rd, Immediate(imm.value() + 1)); - beq(CMPRES2, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - BranchSignedGreater(rd, CMPRES2, l); - } - } - } - - void BranchUnsignedGreater(Register rd, Register rs, Label* l) { - ASSERT(!in_delay_slot_); - sltu(CMPRES2, rs, rd); - bne(CMPRES2, ZR, l); - } - - void BranchUnsignedGreater(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - BranchNotEqual(rd, Immediate(0), l); - } else { - if ((imm.value() != -1) && Utils::IsInt(kImmBits, imm.value() + 1)) { - sltiu(CMPRES2, rd, Immediate(imm.value() + 1)); - beq(CMPRES2, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - BranchUnsignedGreater(rd, CMPRES2, l); - } - } - } - - void BranchSignedGreaterEqual(Register rd, Register rs, Label* l) { - ASSERT(!in_delay_slot_); - slt(CMPRES2, rd, rs); // CMPRES2 = rd < rs ? 1 : 0. - beq(CMPRES2, ZR, l); // If CMPRES2 = 0, then rd >= rs. - } - - void BranchSignedGreaterEqual(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - bgez(rd, l); - } else { - if (Utils::IsInt(kImmBits, imm.value())) { - slti(CMPRES2, rd, imm); - beq(CMPRES2, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - BranchSignedGreaterEqual(rd, CMPRES2, l); - } - } - } - - void BranchUnsignedGreaterEqual(Register rd, Register rs, Label* l) { - ASSERT(!in_delay_slot_); - sltu(CMPRES2, rd, rs); // CMPRES2 = rd < rs ? 1 : 0. - beq(CMPRES2, ZR, l); - } - - void BranchUnsignedGreaterEqual(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - b(l); - } else { - if (Utils::IsInt(kImmBits, imm.value())) { - sltiu(CMPRES2, rd, imm); - beq(CMPRES2, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - BranchUnsignedGreaterEqual(rd, CMPRES2, l); - } - } - } - - void BranchSignedLess(Register rd, Register rs, Label* l) { - ASSERT(!in_delay_slot_); - BranchSignedGreater(rs, rd, l); - } - - void BranchSignedLess(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - bltz(rd, l); - } else { - if (Utils::IsInt(kImmBits, imm.value())) { - slti(CMPRES2, rd, imm); - bne(CMPRES2, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - BranchSignedGreater(CMPRES2, rd, l); - } - } - } - - void BranchUnsignedLess(Register rd, Register rs, Label* l) { - ASSERT(!in_delay_slot_); - BranchUnsignedGreater(rs, rd, l); - } - - void BranchUnsignedLess(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - // Never branch. Fall through. - } else { - if (Utils::IsInt(kImmBits, imm.value())) { - sltiu(CMPRES2, rd, imm); - bne(CMPRES2, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - BranchUnsignedGreater(CMPRES2, rd, l); - } - } - } - - void BranchSignedLessEqual(Register rd, Register rs, Label* l) { - ASSERT(!in_delay_slot_); - BranchSignedGreaterEqual(rs, rd, l); - } - - void BranchSignedLessEqual(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - blez(rd, l); - } else { - if (Utils::IsInt(kImmBits, imm.value() + 1)) { - slti(CMPRES2, rd, Immediate(imm.value() + 1)); - bne(CMPRES2, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - BranchSignedGreaterEqual(CMPRES2, rd, l); - } - } - } - - void BranchUnsignedLessEqual(Register rd, Register rs, Label* l) { - ASSERT(!in_delay_slot_); - BranchUnsignedGreaterEqual(rs, rd, l); - } - - void BranchUnsignedLessEqual(Register rd, const Immediate& imm, Label* l) { - ASSERT(!in_delay_slot_); - if (imm.value() == 0) { - beq(rd, ZR, l); - } else { - if ((imm.value() != -1) && Utils::IsInt(kImmBits, imm.value() + 1)) { - sltiu(CMPRES2, rd, Immediate(imm.value() + 1)); - bne(CMPRES2, ZR, l); - } else { - ASSERT(rd != CMPRES2); - LoadImmediate(CMPRES2, imm.value()); - BranchUnsignedGreaterEqual(CMPRES2, rd, l); - } - } - } - - void Push(Register rt) { - ASSERT(!in_delay_slot_); - addiu(SP, SP, Immediate(-kWordSize)); - sw(rt, Address(SP)); - } - - void Pop(Register rt) { - ASSERT(!in_delay_slot_); - lw(rt, Address(SP)); - addiu(SP, SP, Immediate(kWordSize)); - } - - void Ret() { jr(RA); } - - void SmiTag(Register reg) { sll(reg, reg, kSmiTagSize); } - - void SmiTag(Register dst, Register src) { sll(dst, src, kSmiTagSize); } - - void SmiUntag(Register reg) { sra(reg, reg, kSmiTagSize); } - - void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); } - - void BranchIfNotSmi(Register reg, Label* label) { - andi(CMPRES1, reg, Immediate(kSmiTagMask)); - bne(CMPRES1, ZR, label); - } - - void BranchIfSmi(Register reg, Label* label) { - andi(CMPRES1, reg, Immediate(kSmiTagMask)); - beq(CMPRES1, ZR, label); - } - - void LoadFromOffset(Register reg, Register base, int32_t offset) { - ASSERT(!in_delay_slot_); - if (Utils::IsInt(kImmBits, offset)) { - lw(reg, Address(base, offset)); - } else { - LoadImmediate(TMP, offset); - addu(TMP, base, TMP); - lw(reg, Address(TMP, 0)); - } - } - - void LoadFieldFromOffset(Register reg, Register base, int32_t offset) { - LoadFromOffset(reg, base, offset - kHeapObjectTag); - } - - void StoreToOffset(Register reg, Register base, int32_t offset) { - ASSERT(!in_delay_slot_); - if (Utils::IsInt(kImmBits, offset)) { - sw(reg, Address(base, offset)); - } else { - LoadImmediate(TMP, offset); - addu(TMP, base, TMP); - sw(reg, Address(TMP, 0)); - } - } - - void StoreFieldToOffset(Register reg, Register base, int32_t offset) { - StoreToOffset(reg, base, offset - kHeapObjectTag); - } - - - void StoreDToOffset(DRegister reg, Register base, int32_t offset) { - ASSERT(!in_delay_slot_); - FRegister lo = static_cast(reg * 2); - FRegister hi = static_cast(reg * 2 + 1); - swc1(lo, Address(base, offset)); - swc1(hi, Address(base, offset + kWordSize)); - } - - void LoadDFromOffset(DRegister reg, Register base, int32_t offset) { - ASSERT(!in_delay_slot_); - FRegister lo = static_cast(reg * 2); - FRegister hi = static_cast(reg * 2 + 1); - lwc1(lo, Address(base, offset)); - lwc1(hi, Address(base, offset + kWordSize)); - } - - // dest gets the address of the following instruction. If temp is given, - // RA is preserved using it as a temporary. - void GetNextPC(Register dest, Register temp = kNoRegister); - - void ReserveAlignedFrameSpace(intptr_t frame_space); - - // Create a frame for calling into runtime that preserves all volatile - // registers. Frame's SP is guaranteed to be correctly aligned and - // frame_space bytes are reserved under it. - void EnterCallRuntimeFrame(intptr_t frame_space); - void LeaveCallRuntimeFrame(); - - void LoadObject(Register rd, const Object& object); - void LoadUniqueObject(Register rd, const Object& object); - void LoadFunctionFromCalleePool(Register dst, - const Function& function, - Register new_pp); - void LoadNativeEntry(Register rd, - const ExternalLabel* label, - Patchability patchable); - void PushObject(const Object& object); - - void LoadIsolate(Register result); - - void LoadClassId(Register result, Register object); - void LoadClassById(Register result, Register class_id); - void LoadClass(Register result, Register object); - void LoadClassIdMayBeSmi(Register result, Register object); - void LoadTaggedClassIdMayBeSmi(Register result, Register object); - - void StoreIntoObject(Register object, // Object we are storing into. - const Address& dest, // Where we are storing into. - Register value, // Value we are storing. - bool can_value_be_smi = true); - void StoreIntoObjectOffset(Register object, - int32_t offset, - Register value, - bool can_value_be_smi = true); - - void StoreIntoObjectNoBarrier(Register object, - const Address& dest, - Register value); - void StoreIntoObjectNoBarrierOffset(Register object, - int32_t offset, - Register value); - void StoreIntoObjectNoBarrier(Register object, - const Address& dest, - const Object& value); - void StoreIntoObjectNoBarrierOffset(Register object, - int32_t offset, - const Object& value); - - void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count); - - // Set up a Dart frame on entry with a frame pointer and PC information to - // enable easy access to the RawInstruction object of code corresponding - // to this frame. - void EnterDartFrame(intptr_t frame_size); - void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP); - void LeaveDartFrameAndReturn(Register ra = RA); - - // Set up a Dart frame for a function compiled for on-stack replacement. - // The frame layout is a normal Dart frame, but the frame is partially set - // up on entry (it is the frame of the unoptimized code). - void EnterOsrFrame(intptr_t extra_size); - - Address ElementAddressForIntIndex(bool is_external, - intptr_t cid, - intptr_t index_scale, - Register array, - intptr_t index) const; - void LoadElementAddressForIntIndex(Register address, - bool is_external, - intptr_t cid, - intptr_t index_scale, - Register array, - intptr_t index); - Address ElementAddressForRegIndex(bool is_load, - bool is_external, - intptr_t cid, - intptr_t index_scale, - Register array, - Register index); - void LoadElementAddressForRegIndex(Register address, - bool is_load, - bool is_external, - intptr_t cid, - intptr_t index_scale, - Register array, - Register index); - - void LoadHalfWordUnaligned(Register dst, Register addr, Register tmp); - void LoadHalfWordUnsignedUnaligned(Register dst, Register addr, Register tmp); - void StoreHalfWordUnaligned(Register src, Register addr, Register tmp); - void LoadWordUnaligned(Register dst, Register addr, Register tmp); - void StoreWordUnaligned(Register src, Register addr, Register tmp); - - static Address VMTagAddress() { - return Address(THR, Thread::vm_tag_offset()); - } - - // On some other platforms, we draw a distinction between safe and unsafe - // smis. - static bool IsSafe(const Object& object) { return true; } - static bool IsSafeSmi(const Object& object) { return object.IsSmi(); } - - bool constant_pool_allowed() const { return constant_pool_allowed_; } - void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; } - - private: - AssemblerBuffer buffer_; - ObjectPoolWrapper object_pool_wrapper_; - - intptr_t prologue_offset_; - bool has_single_entry_point_; - bool use_far_branches_; - bool delay_slot_available_; - bool in_delay_slot_; - - class CodeComment : public ZoneAllocated { - public: - CodeComment(intptr_t pc_offset, const String& comment) - : pc_offset_(pc_offset), comment_(comment) {} - - intptr_t pc_offset() const { return pc_offset_; } - const String& comment() const { return comment_; } - - private: - intptr_t pc_offset_; - const String& comment_; - - DISALLOW_COPY_AND_ASSIGN(CodeComment); - }; - - GrowableArray comments_; - - bool constant_pool_allowed_; - - void BranchLink(const ExternalLabel* label); - void BranchLink(const Code& code, Patchability patchable); - - bool CanLoadFromObjectPool(const Object& object) const; - - void LoadWordFromPoolOffset(Register rd, int32_t offset, Register pp = PP); - void LoadObjectHelper(Register rd, const Object& object, bool is_unique); - - void Emit(int32_t value) { - // Emitting an instruction clears the delay slot state. - in_delay_slot_ = false; - delay_slot_available_ = false; - AssemblerBuffer::EnsureCapacity ensured(&buffer_); - buffer_.Emit(value); - } - - // Encode CPU instructions according to the types specified in - // Figures 4-1, 4-2 and 4-3 in VolI-A. - void EmitIType(Opcode opcode, Register rs, Register rt, uint16_t imm) { - Emit(opcode << kOpcodeShift | rs << kRsShift | rt << kRtShift | imm); - } - - void EmitLoadStore(Opcode opcode, Register rt, const Address& addr) { - Emit(opcode << kOpcodeShift | rt << kRtShift | addr.encoding()); - } - - void EmitFpuLoadStore(Opcode opcode, FRegister ft, const Address& addr) { - Emit(opcode << kOpcodeShift | ft << kFtShift | addr.encoding()); - } - - void EmitRegImmType(Opcode opcode, Register rs, RtRegImm code, uint16_t imm) { - Emit(opcode << kOpcodeShift | rs << kRsShift | code << kRtShift | imm); - } - - void EmitJType(Opcode opcode, uint32_t destination) { UNIMPLEMENTED(); } - - void EmitRType(Opcode opcode, - Register rs, - Register rt, - Register rd, - int sa, - SpecialFunction func) { - ASSERT(Utils::IsUint(5, sa)); - Emit(opcode << kOpcodeShift | rs << kRsShift | rt << kRtShift | - rd << kRdShift | sa << kSaShift | func << kFunctionShift); - } - - void EmitFpuRType(Opcode opcode, - Format fmt, - FRegister ft, - FRegister fs, - FRegister fd, - Cop1Function func) { - Emit(opcode << kOpcodeShift | fmt << kFmtShift | ft << kFtShift | - fs << kFsShift | fd << kFdShift | func << kCop1FnShift); - } - - int32_t EncodeBranchOffset(int32_t offset, int32_t instr); - - void EmitFarJump(int32_t offset, bool link); - void EmitFarBranch(Opcode b, Register rs, Register rt, int32_t offset); - void EmitFarRegImmBranch(RtRegImm b, Register rs, int32_t offset); - void EmitFarFpuBranch(bool kind, int32_t offset); - void EmitBranch(Opcode b, Register rs, Register rt, Label* label); - void EmitRegImmBranch(RtRegImm b, Register rs, Label* label); - void EmitFpuBranch(bool kind, Label* label); - - void EmitBranchDelayNop() { - Emit(Instr::kNopInstruction); // Branch delay NOP. - delay_slot_available_ = true; - } - - void StoreIntoObjectFilter(Register object, Register value, Label* no_update); - - // Shorter filtering sequence that assumes that value is not a smi. - void StoreIntoObjectFilterNoSmi(Register object, - Register value, - Label* no_update); - - DISALLOW_ALLOCATION(); - DISALLOW_COPY_AND_ASSIGN(Assembler); -}; - -} // namespace dart - -#endif // RUNTIME_VM_ASSEMBLER_MIPS_H_ diff --git a/runtime/vm/assembler_mips_test.cc b/runtime/vm/assembler_mips_test.cc deleted file mode 100644 index 44c633f85c4..00000000000 --- a/runtime/vm/assembler_mips_test.cc +++ /dev/null @@ -1,2323 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -#include "vm/assembler.h" -#include "vm/cpu.h" -#include "vm/os.h" -#include "vm/unit_test.h" -#include "vm/virtual_memory.h" - -namespace dart { - -#define __ assembler-> - -ASSEMBLER_TEST_GENERATE(Simple, assembler) { - __ LoadImmediate(V0, 42); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Simple, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Addiu, assembler) { - __ addiu(V0, ZR, Immediate(42)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Addiu, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Addiu_overflow, assembler) { - __ LoadImmediate(V0, 0x7fffffff); - __ addiu(V0, V0, Immediate(1)); // V0 is modified on overflow. - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Addiu_overflow, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(static_cast(0x80000000), - EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Addu, assembler) { - __ addiu(T2, ZR, Immediate(21)); - __ addiu(T3, ZR, Immediate(21)); - __ addu(V0, T2, T3); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Addu, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Addu_overflow, assembler) { - __ LoadImmediate(T2, 0x7fffffff); - __ addiu(T3, R0, Immediate(1)); - __ addu(V0, T2, T3); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Addu_overflow, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(static_cast(0x80000000), - EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(And, assembler) { - __ addiu(T2, ZR, Immediate(42)); - __ addiu(T3, ZR, Immediate(2)); - __ and_(V0, T2, T3); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(And, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Andi, assembler) { - __ addiu(T1, ZR, Immediate(42)); - __ andi(V0, T1, Immediate(2)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Andi, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Clo, assembler) { - __ addiu(T1, ZR, Immediate(-1)); - __ clo(V0, T1); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Clo, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(32, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Clz, assembler) { - __ addiu(T1, ZR, Immediate(0x7fff)); - __ clz(V0, T1); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Clz, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(17, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(MtloMflo, assembler) { - __ LoadImmediate(T0, 42); - __ mtlo(T0); - __ mflo(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(MtloMflo, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(MthiMfhi, assembler) { - __ LoadImmediate(T0, 42); - __ mthi(T0); - __ mfhi(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(MthiMfhi, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Divu, assembler) { - __ addiu(T1, ZR, Immediate(27)); - __ addiu(T2, ZR, Immediate(9)); - __ divu(T1, T2); - __ mflo(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Divu, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Div, assembler) { - __ addiu(T1, ZR, Immediate(27)); - __ addiu(T2, ZR, Immediate(9)); - __ div(T1, T2); - __ mflo(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Div, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Divu_corner, assembler) { - __ LoadImmediate(T1, 0x80000000); - __ LoadImmediate(T2, 0xffffffff); - __ divu(T1, T2); - __ mflo(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Divu_corner, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Div_corner, assembler) { - __ LoadImmediate(T1, 0x80000000); - __ LoadImmediate(T2, 0xffffffff); - __ div(T1, T2); - __ mflo(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Div_corner, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(static_cast(0x80000000), - EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Lb, assembler) { - __ addiu(SP, SP, Immediate(-kWordSize * 30)); - __ LoadImmediate(T1, 0xff); - __ sb(T1, Address(SP)); - __ lb(V0, Address(SP)); - __ addiu(SP, SP, Immediate(kWordSize * 30)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Lb, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Lb_offset, assembler) { - __ addiu(SP, SP, Immediate(-kWordSize * 30)); - __ LoadImmediate(T1, 0xff); - __ sb(T1, Address(SP, 1)); - __ lb(V0, Address(SP, 1)); - __ addiu(SP, SP, Immediate(kWordSize * 30)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Lb_offset, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Lbu, assembler) { - __ addiu(SP, SP, Immediate(-kWordSize * 30)); - __ LoadImmediate(T1, 0xff); - __ sb(T1, Address(SP)); - __ lbu(V0, Address(SP)); - __ addiu(SP, SP, Immediate(kWordSize * 30)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Lbu, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(255, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Lh, assembler) { - __ addiu(SP, SP, Immediate(-kWordSize * 30)); - __ LoadImmediate(T1, 0xffff); - __ sh(T1, Address(SP)); - __ lh(V0, Address(SP)); - __ addiu(SP, SP, Immediate(kWordSize * 30)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Lh, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Lhu, assembler) { - __ addiu(SP, SP, Immediate(-kWordSize * 30)); - __ LoadImmediate(T1, 0xffff); - __ sh(T1, Address(SP)); - __ lhu(V0, Address(SP)); - __ addiu(SP, SP, Immediate(kWordSize * 30)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Lhu, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(65535, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Lw, assembler) { - __ addiu(SP, SP, Immediate(-kWordSize * 30)); - __ LoadImmediate(T1, -1); - __ sw(T1, Address(SP)); - __ lw(V0, Address(SP)); - __ addiu(SP, SP, Immediate(kWordSize * 30)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Lw, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(LoadHalfWordUnaligned, assembler) { - __ LoadHalfWordUnaligned(V0, A0, TMP); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(LoadHalfWordUnaligned, test) { - EXPECT(test != NULL); - typedef intptr_t (*LoadHalfWordUnaligned)(intptr_t) DART_UNUSED; - uint8_t buffer[4] = { - 0x89, 0xAB, 0xCD, 0xEF, - }; - - EXPECT_EQ( - static_cast(static_cast(0xAB89)), - EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadHalfWordUnaligned, test->entry(), - reinterpret_cast(&buffer[0]))); - EXPECT_EQ( - static_cast(static_cast(0xCDAB)), - EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadHalfWordUnaligned, test->entry(), - reinterpret_cast(&buffer[1]))); -} - - -ASSEMBLER_TEST_GENERATE(LoadHalfWordUnsignedUnaligned, assembler) { - __ LoadHalfWordUnsignedUnaligned(V0, A0, TMP); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(LoadHalfWordUnsignedUnaligned, test) { - EXPECT(test != NULL); - typedef intptr_t (*LoadHalfWordUnsignedUnaligned)(intptr_t) DART_UNUSED; - uint8_t buffer[4] = { - 0x89, 0xAB, 0xCD, 0xEF, - }; - - EXPECT_EQ(0xAB89, EXECUTE_TEST_CODE_INTPTR_INTPTR( - LoadHalfWordUnsignedUnaligned, test->entry(), - reinterpret_cast(&buffer[0]))); - EXPECT_EQ(0xCDAB, EXECUTE_TEST_CODE_INTPTR_INTPTR( - LoadHalfWordUnsignedUnaligned, test->entry(), - reinterpret_cast(&buffer[1]))); -} - - -ASSEMBLER_TEST_GENERATE(StoreHalfWordUnaligned, assembler) { - __ LoadImmediate(A1, 0xABCD); - __ StoreWordUnaligned(A1, A0, TMP); - __ mov(V0, A1); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(StoreHalfWordUnaligned, test) { - EXPECT(test != NULL); - typedef intptr_t (*StoreHalfWordUnaligned)(intptr_t) DART_UNUSED; - uint8_t buffer[4] = { - 0, 0, 0, 0, - }; - - EXPECT_EQ(0xABCD, EXECUTE_TEST_CODE_INTPTR_INTPTR( - StoreHalfWordUnaligned, test->entry(), - reinterpret_cast(&buffer[0]))); - EXPECT_EQ(0xCD, buffer[0]); - EXPECT_EQ(0xAB, buffer[1]); - EXPECT_EQ(0, buffer[2]); - - EXPECT_EQ(0xABCD, EXECUTE_TEST_CODE_INTPTR_INTPTR( - StoreHalfWordUnaligned, test->entry(), - reinterpret_cast(&buffer[1]))); - EXPECT_EQ(0xCD, buffer[1]); - EXPECT_EQ(0xAB, buffer[2]); - EXPECT_EQ(0, buffer[3]); -} - - -ASSEMBLER_TEST_GENERATE(LoadWordUnaligned, assembler) { - __ LoadWordUnaligned(V0, A0, TMP); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(LoadWordUnaligned, test) { - EXPECT(test != NULL); - typedef intptr_t (*LoadWordUnaligned)(intptr_t) DART_UNUSED; - uint8_t buffer[8] = {0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0}; - - EXPECT_EQ( - static_cast(0x78563412), - EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), - reinterpret_cast(&buffer[0]))); - EXPECT_EQ( - static_cast(0x9A785634), - EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), - reinterpret_cast(&buffer[1]))); - EXPECT_EQ( - static_cast(0xBC9A7856), - EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), - reinterpret_cast(&buffer[2]))); - EXPECT_EQ( - static_cast(0xDEBC9A78), - EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), - reinterpret_cast(&buffer[3]))); -} - - -ASSEMBLER_TEST_GENERATE(StoreWordUnaligned, assembler) { - __ LoadImmediate(A1, 0x12345678); - __ StoreWordUnaligned(A1, A0, TMP); - __ mov(V0, A1); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(StoreWordUnaligned, test) { - EXPECT(test != NULL); - typedef intptr_t (*StoreWordUnaligned)(intptr_t) DART_UNUSED; - uint8_t buffer[8] = {0, 0, 0, 0, 0, 0, 0, 0}; - - EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( - StoreWordUnaligned, test->entry(), - reinterpret_cast(&buffer[0]))); - EXPECT_EQ(0x78, buffer[0]); - EXPECT_EQ(0x56, buffer[1]); - EXPECT_EQ(0x34, buffer[2]); - EXPECT_EQ(0x12, buffer[3]); - - EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( - StoreWordUnaligned, test->entry(), - reinterpret_cast(&buffer[1]))); - EXPECT_EQ(0x78, buffer[1]); - EXPECT_EQ(0x56, buffer[2]); - EXPECT_EQ(0x34, buffer[3]); - EXPECT_EQ(0x12, buffer[4]); - - EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( - StoreWordUnaligned, test->entry(), - reinterpret_cast(&buffer[2]))); - EXPECT_EQ(0x78, buffer[2]); - EXPECT_EQ(0x56, buffer[3]); - EXPECT_EQ(0x34, buffer[4]); - EXPECT_EQ(0x12, buffer[5]); - - EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( - StoreWordUnaligned, test->entry(), - reinterpret_cast(&buffer[3]))); - EXPECT_EQ(0x78, buffer[3]); - EXPECT_EQ(0x56, buffer[4]); - EXPECT_EQ(0x34, buffer[5]); - EXPECT_EQ(0x12, buffer[6]); -} - - -ASSEMBLER_TEST_GENERATE(Lui, assembler) { - __ lui(V0, Immediate(42)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Lui, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42 << 16, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Sll, assembler) { - __ LoadImmediate(T1, 21); - __ sll(V0, T1, 1); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Sll, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Srl, assembler) { - __ LoadImmediate(T1, 84); - __ srl(V0, T1, 1); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Srl, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(LShifting, assembler) { - __ LoadImmediate(T1, 1); - __ sll(T1, T1, 31); - __ srl(V0, T1, 31); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(LShifting, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(RShifting, assembler) { - __ LoadImmediate(T1, 1); - __ sll(T1, T1, 31); - __ sra(V0, T1, 31); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(RShifting, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Sllv, assembler) { - __ LoadImmediate(T1, 21); - __ LoadImmediate(T2, 1); - __ sllv(V0, T1, T2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Sllv, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Srlv, assembler) { - __ LoadImmediate(T1, 84); - __ LoadImmediate(T2, 1); - __ srlv(V0, T1, T2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Srlv, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(LShiftingV, assembler) { - __ LoadImmediate(T1, 1); - __ LoadImmediate(T2, 31); - __ sllv(T1, T1, T2); - __ srlv(V0, T1, T2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(LShiftingV, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(RShiftingV, assembler) { - __ LoadImmediate(T1, 1); - __ LoadImmediate(T2, 31); - __ sllv(T1, T1, T2); - __ srav(V0, T1, T2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(RShiftingV, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Mult_pos, assembler) { - __ LoadImmediate(T1, 6); - __ LoadImmediate(T2, 7); - __ mult(T1, T2); - __ mflo(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Mult_pos, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Mult_neg, assembler) { - __ LoadImmediate(T1, -6); - __ LoadImmediate(T2, 7); - __ mult(T1, T2); - __ mflo(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Mult_neg, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Mult_neg_hi, assembler) { - __ LoadImmediate(T1, -6); - __ LoadImmediate(T2, 7); - __ mult(T1, T2); - __ mfhi(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Mult_neg_hi, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Multu_lo, assembler) { - __ LoadImmediate(T1, 6); - __ LoadImmediate(T2, 7); - __ multu(T1, T2); - __ mflo(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Multu_lo, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Multu_hi, assembler) { - __ LoadImmediate(T1, -1); - __ LoadImmediate(T2, -1); - __ multu(T1, T2); - __ mfhi(V0); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Multu_hi, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Madd_neg, assembler) { - __ LoadImmediate(T1, -6); - __ LoadImmediate(T2, 7); - __ mult(T1, T2); - __ madd(T1, T2); - __ mflo(V0); - __ mfhi(V1); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Madd_neg, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-84, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Subu, assembler) { - __ LoadImmediate(T1, 737); - __ LoadImmediate(T2, 695); - __ subu(V0, T1, T2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Subu, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Or, assembler) { - __ LoadImmediate(T1, 34); - __ LoadImmediate(T2, 8); - __ or_(V0, T1, T2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Or, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Nor, assembler) { - __ LoadImmediate(T1, -47); - __ LoadImmediate(T2, -60); - __ nor(V0, T1, T2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Nor, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Xor, assembler) { - __ LoadImmediate(T1, 51); - __ LoadImmediate(T2, 25); - __ xor_(V0, T1, T2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Xor, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Xori, assembler) { - __ LoadImmediate(T0, 51); - __ xori(V0, T0, Immediate(25)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Xori, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Slt, assembler) { - __ LoadImmediate(T1, -1); - __ LoadImmediate(T2, 0); - __ slt(V0, T1, T2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Slt, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Sltu, assembler) { - __ LoadImmediate(T1, -1); - __ LoadImmediate(T2, 0); - __ sltu(V0, T1, T2); // 0xffffffffUL < 0 -> 0. - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Sltu, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Slti, assembler) { - __ LoadImmediate(T1, -2); - __ slti(A0, T1, Immediate(-1)); // -2 < -1 -> 1. - __ slti(A1, T1, Immediate(0)); // -2 < 0 -> 1. - __ and_(V0, A0, A1); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Slti, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Sltiu, assembler) { - __ LoadImmediate(T1, -1); - __ LoadImmediate(T2, 0x10000); - __ sltiu(A0, T1, Immediate(-2)); // 0xffffffffUL < 0xfffffffeUL -> 0. - __ sltiu(A1, T1, Immediate(0)); // 0xffffffffUL < 0 -> 0. - __ sltiu(A2, T2, Immediate(-2)); // 0x10000UL < 0xfffffffeUL -> 1. - __ addiu(A2, A2, Immediate(-1)); - __ or_(V0, A0, A1); - __ or_(V0, V0, A2); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Sltiu, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Movz, assembler) { - __ LoadImmediate(T1, 42); - __ LoadImmediate(T2, 23); - __ slt(T3, T1, T2); - __ movz(V0, T1, T3); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Movz, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Movn, assembler) { - __ LoadImmediate(T1, 42); - __ LoadImmediate(T2, 23); - __ slt(T3, T2, T1); - __ movn(V0, T1, T3); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Movn, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Jr_delay, assembler) { - __ jr(RA); - __ delay_slot()->ori(V0, ZR, Immediate(42)); -} - - -ASSEMBLER_TEST_RUN(Jr_delay, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_backward, assembler) { - Label l; - - __ LoadImmediate(T1, 0); - __ LoadImmediate(T2, 1); - __ Bind(&l); - __ addiu(T1, T1, Immediate(1)); - __ beq(T1, T2, &l); - __ ori(V0, T1, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_backward, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_backward_far, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T1, 0); - __ LoadImmediate(T2, 1); - __ Bind(&l); - __ addiu(T1, T1, Immediate(1)); - __ beq(T1, T2, &l); - __ ori(V0, T1, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_backward_far, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_backward_delay, assembler) { - Label l; - - __ LoadImmediate(T1, 0); - __ LoadImmediate(T2, 1); - __ Bind(&l); - __ addiu(T1, T1, Immediate(1)); - __ beq(T1, T2, &l); - __ delay_slot()->addiu(T1, T1, Immediate(1)); - __ ori(V0, T1, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_backward_delay, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_taken, assembler) { - Label l; - - __ LoadImmediate(T5, 1); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_taken, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_taken_far, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 1); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_taken_far, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_not_taken, assembler) { - Label l; - - __ LoadImmediate(T5, 0); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ nop(); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_not_taken, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_not_taken_far, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 0); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ nop(); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_not_taken_far, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_not_taken_far2, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 0); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ nop(); - for (int i = 0; i < (1 << 15); i++) { - __ nop(); - } - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_not_taken_far2, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_taken2, assembler) { - Label l; - - __ LoadImmediate(T5, 1); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ nop(); - __ nop(); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_taken2, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_taken_far2, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 1); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ nop(); - __ nop(); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_taken_far2, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_taken_far3, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 1); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ nop(); - for (int i = 0; i < (1 << 15); i++) { - __ nop(); - } - __ nop(); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_taken_far3, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_taken_delay, assembler) { - Label l; - - __ LoadImmediate(T5, 1); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ delay_slot()->ori(V0, V0, Immediate(1)); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_taken_delay, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(43, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beq_forward_not_taken_delay, assembler) { - Label l; - - __ LoadImmediate(T5, 0); - __ LoadImmediate(T6, 1); - - __ LoadImmediate(V0, 42); - __ beq(T5, T6, &l); - __ delay_slot()->ori(V0, V0, Immediate(1)); - __ addiu(V0, V0, Immediate(1)); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beq_forward_not_taken_delay, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(44, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Beql_backward_delay, assembler) { - Label l; - - __ LoadImmediate(T5, 0); - __ LoadImmediate(T6, 1); - __ Bind(&l); - __ addiu(T5, T5, Immediate(1)); - __ beql(T5, T6, &l); - __ delay_slot()->addiu(T5, T5, Immediate(1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Beql_backward_delay, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgez, assembler) { - Label l; - - __ LoadImmediate(T5, 3); - __ Bind(&l); - __ bgez(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(-1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgez, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgez_far, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 3); - __ Bind(&l); - __ bgez(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(-1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgez_far, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgez_far2, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 3); - __ Bind(&l); - for (int i = 0; i < (1 << 15); i++) { - __ nop(); - } - __ bgez(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(-1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgez_far2, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgez_taken_forward_far, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 1); - - __ LoadImmediate(V0, 42); - __ bgez(T5, &l); - __ nop(); - __ nop(); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgez_taken_forward_far, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgez_taken_forward_far2, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 1); - - __ LoadImmediate(V0, 42); - __ bgez(T5, &l); - __ nop(); - for (int i = 0; i < (1 << 15); i++) { - __ nop(); - } - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgez_taken_forward_far2, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgez_not_taken_forward_far, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, -1); - - __ LoadImmediate(V0, 42); - __ bgez(T5, &l); - __ nop(); - __ nop(); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgez_not_taken_forward_far, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgez_not_taken_forward_far2, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, -1); - - __ LoadImmediate(V0, 42); - __ bgez(T5, &l); - __ nop(); - for (int i = 0; i < (1 << 15); i++) { - __ nop(); - } - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgez_not_taken_forward_far2, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgezl, assembler) { - Label l; - - __ LoadImmediate(T5, 3); - __ Bind(&l); - __ bgezl(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(-1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgezl, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Blez, assembler) { - Label l; - - __ LoadImmediate(T5, -3); - __ Bind(&l); - __ blez(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Blez, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Blez_far, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, -3); - __ Bind(&l); - __ blez(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Blez_far, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Blez_far2, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, -3); - __ Bind(&l); - for (int i = 0; i < (1 << 15); i++) { - __ nop(); - } - __ blez(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Blez_far2, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Blez_taken_forward_far, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, -1); - - __ LoadImmediate(V0, 42); - __ blez(T5, &l); - __ nop(); - __ nop(); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Blez_taken_forward_far, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Blez_not_taken_forward_far, assembler) { - Label l; - - __ set_use_far_branches(true); - - __ LoadImmediate(T5, 1); - - __ LoadImmediate(V0, 42); - __ blez(T5, &l); - __ nop(); - __ nop(); - __ LoadImmediate(V0, 0); - __ Bind(&l); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Blez_not_taken_forward_far, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Blezl, assembler) { - Label l; - - __ LoadImmediate(T5, -3); - __ Bind(&l); - __ blezl(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Blezl, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgtz, assembler) { - Label l; - - __ LoadImmediate(T5, 3); - __ Bind(&l); - __ bgtz(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(-1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgtz, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bgtzl, assembler) { - Label l; - - __ LoadImmediate(T5, 3); - __ Bind(&l); - __ bgtzl(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(-1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bgtzl, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bltz, assembler) { - Label l; - - __ LoadImmediate(T5, -3); - __ Bind(&l); - __ bltz(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bltz, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bltzl, assembler) { - Label l; - - __ LoadImmediate(T5, -3); - __ Bind(&l); - __ bltzl(T5, &l); - __ delay_slot()->addiu(T5, T5, Immediate(1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bltzl, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bne, assembler) { - Label l; - - __ LoadImmediate(T5, 3); - __ Bind(&l); - __ bne(T5, R0, &l); - __ delay_slot()->addiu(T5, T5, Immediate(-1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bne, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Bnel, assembler) { - Label l; - - __ LoadImmediate(T5, 3); - __ Bind(&l); - __ bnel(T5, R0, &l); - __ delay_slot()->addiu(T5, T5, Immediate(-1)); - __ ori(V0, T5, Immediate(0)); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Bnel, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Label_link1, assembler) { - Label l; - - __ bgez(ZR, &l); - __ bgez(ZR, &l); - __ bgez(ZR, &l); - - __ LoadImmediate(V0, 1); - __ Bind(&l); - __ mov(V0, ZR); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Label_link1, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Label_link2, assembler) { - Label l; - - __ beq(ZR, ZR, &l); - __ beq(ZR, ZR, &l); - __ beq(ZR, ZR, &l); - - __ LoadImmediate(V0, 1); - __ Bind(&l); - __ mov(V0, ZR); - __ jr(RA); -} - - -ASSEMBLER_TEST_RUN(Label_link2, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Jalr_delay, assembler) { - __ mov(T2, RA); - __ jalr(T2, RA); - __ delay_slot()->ori(V0, ZR, Immediate(42)); -} - - -ASSEMBLER_TEST_RUN(Jalr_delay, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(AddOverflow_detect, assembler) { - Register left = T0; - Register right = T1; - Register result = T2; - Register overflow = T3; - Register scratch = T4; - Label error, done; - - __ LoadImmediate(V0, 1); // Success value. - - __ LoadImmediate(left, 0x7fffffff); - __ LoadImmediate(right, 1); - __ AdduDetectOverflow(result, left, right, overflow); - __ bgez(overflow, &error); // INT_MAX + 1 overflows. - - __ LoadImmediate(left, 0x7fffffff); - __ AdduDetectOverflow(result, left, left, overflow); - __ bgez(overflow, &error); // INT_MAX + INT_MAX overflows. - - __ LoadImmediate(left, 0x7fffffff); - __ LoadImmediate(right, -1); - __ AdduDetectOverflow(result, left, right, overflow); - __ bltz(overflow, &error); // INT_MAX - 1 does not overflow. - - __ LoadImmediate(left, -1); - __ LoadImmediate(right, 1); - __ AdduDetectOverflow(result, left, right, overflow); - __ bltz(overflow, &error); // -1 + 1 does not overflow. - - __ LoadImmediate(left, 123456); - __ LoadImmediate(right, 654321); - __ AdduDetectOverflow(result, left, right, overflow); - __ bltz(overflow, &error); // 123456 + 654321 does not overflow. - - __ LoadImmediate(left, 0x80000000); - __ LoadImmediate(right, -1); - __ AdduDetectOverflow(result, left, right, overflow); - __ bgez(overflow, &error); // INT_MIN - 1 overflows. - - // result has 0x7fffffff. - __ AdduDetectOverflow(result, result, result, overflow, scratch); - __ bgez(overflow, &error); // INT_MAX + INT_MAX overflows. - - __ LoadImmediate(left, 0x80000000); - __ LoadImmediate(right, 0x80000000); - __ AdduDetectOverflow(result, left, right, overflow); - __ bgez(overflow, &error); // INT_MIN + INT_MIN overflows. - - __ LoadImmediate(left, -123456); - __ LoadImmediate(right, -654321); - __ AdduDetectOverflow(result, left, right, overflow); - __ bltz(overflow, &error); // -123456 + -654321 does not overflow. - - __ b(&done); - __ Bind(&error); - __ mov(V0, ZR); - __ Bind(&done); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(AddOverflow_detect, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(SubOverflow_detect, assembler) { - Register left = T0; - Register right = T1; - Register result = T2; - Register overflow = T3; - Label error, done; - - __ LoadImmediate(V0, 1); // Success value. - - __ LoadImmediate(left, 0x80000000); - __ LoadImmediate(right, 1); - __ SubuDetectOverflow(result, left, right, overflow); - __ bgez(overflow, &error); // INT_MIN - 1 overflows. - - __ LoadImmediate(left, 0x7fffffff); - __ LoadImmediate(right, 0x8000000); - __ SubuDetectOverflow(result, left, left, overflow); - __ bltz(overflow, &error); // INT_MIN - INT_MAX does not overflow. - - __ LoadImmediate(left, 0x80000000); - __ LoadImmediate(right, 0x80000000); - __ SubuDetectOverflow(result, left, right, overflow); - __ bltz(overflow, &error); // INT_MIN - INT_MIN does not overflow. - - __ LoadImmediate(left, 0x7fffffff); - __ LoadImmediate(right, 0x80000000); - __ SubuDetectOverflow(result, left, right, overflow); - __ bgez(overflow, &error); // INT_MAX - INT_MIN overflows. - - __ LoadImmediate(left, 1); - __ LoadImmediate(right, -1); - __ SubuDetectOverflow(result, left, right, overflow); - __ bltz(overflow, &error); // 1 - -1 does not overflow. - - __ b(&done); - __ Bind(&error); - __ mov(V0, ZR); - __ Bind(&done); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(SubOverflow_detect, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Mtc1Mfc1, assembler) { - __ mtc1(ZR, F0); - __ mtc1(ZR, F1); - __ mfc1(V0, F0); - __ mfc1(V1, F1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Mtc1Mfc1, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Addd, assembler) { - __ LoadImmediate(D0, 1.0); - __ LoadImmediate(D1, 2.0); - __ addd(D0, D0, D1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Addd, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(3.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Movd, assembler) { - __ LoadImmediate(D1, 1.0); - __ movd(D0, D1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Movd, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(1.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Negd, assembler) { - __ LoadImmediate(D1, 1.0); - __ negd(D0, D1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Negd, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(-1.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Sdc1Ldc1, assembler) { - __ mov(T0, SP); - __ AddImmediate(SP, -3 * kWordSize); - __ AndImmediate(SP, SP, ~(8 - 1)); // Align SP by 8 bytes. - __ LoadImmediate(D1, 1.0); - __ sdc1(D1, Address(SP)); - __ ldc1(D0, Address(SP)); - __ mov(SP, T0); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Sdc1Ldc1, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(1.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Addd_NaN, assembler) { - __ LoadImmediate(D0, 1.0); - // Double non-signaling NaN is 0x7FF8000000000000. - __ LoadImmediate(T0, 0x7FF80000); - __ mtc1(ZR, F2); // Load upper bits of NaN. - __ mtc1(T0, F3); // Load lower bits of NaN. - __ addd(D0, D0, D1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Addd_NaN, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_EQ(isnan(res), true); -} - - -ASSEMBLER_TEST_GENERATE(Addd_Inf, assembler) { - __ LoadImmediate(D0, 1.0); - __ LoadImmediate(T0, 0x7FF00000); // +inf - __ mtc1(ZR, F2); - __ mtc1(T0, F3); - __ addd(D0, D0, D1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Addd_Inf, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_EQ(isfinite(res), false); -} - - -ASSEMBLER_TEST_GENERATE(Subd, assembler) { - __ LoadImmediate(D0, 2.5); - __ LoadImmediate(D1, 1.5); - __ subd(D0, D0, D1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Subd, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(1.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Muld, assembler) { - __ LoadImmediate(D0, 6.0); - __ LoadImmediate(D1, 7.0); - __ muld(D0, D0, D1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Muld, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(42.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Divd, assembler) { - __ LoadImmediate(D0, 42.0); - __ LoadImmediate(D1, 7.0); - __ divd(D0, D0, D1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Divd, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(6.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Sqrtd, assembler) { - __ LoadImmediate(D1, 36.0); - __ sqrtd(D0, D1); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Sqrtd, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(6.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Cop1CUN, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.0); - __ LoadImmediate(T0, 0x7FF80000); - __ mtc1(ZR, F2); - __ mtc1(T0, F3); - __ LoadImmediate(V0, 42); - __ cund(D0, D1); - __ bc1t(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1CUN, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1CUN_not_taken, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.0); - __ LoadImmediate(D1, 42.0); - __ LoadImmediate(V0, 42); - __ cund(D0, D1); - __ bc1t(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1CUN_not_taken, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1CEq, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.5); - __ LoadImmediate(D1, 42.5); - __ LoadImmediate(V0, 42); - __ ceqd(D0, D1); - __ bc1t(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1CEq, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1CEq_not_taken, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.0); - __ LoadImmediate(D1, 42.5); - __ LoadImmediate(V0, 42); - __ ceqd(D0, D1); - __ bc1t(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1CEq_not_taken, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1CEq_false, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.0); - __ LoadImmediate(D1, 42.5); - __ LoadImmediate(V0, 42); - __ ceqd(D0, D1); - __ bc1f(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1CEq_false, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1CEq_false_not_taken, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.5); - __ LoadImmediate(D1, 42.5); - __ LoadImmediate(V0, 42); - __ ceqd(D0, D1); - __ bc1f(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1CEq_false_not_taken, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1COLT, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.0); - __ LoadImmediate(D1, 42.5); - __ LoadImmediate(V0, 42); - __ coltd(D0, D1); - __ bc1t(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1COLT, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1COLT_not_taken, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.5); - __ LoadImmediate(D1, 42.0); - __ LoadImmediate(V0, 42); - __ coltd(D0, D1); - __ bc1t(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1COLT_not_taken, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1COLE, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.0); - __ LoadImmediate(D1, 42.0); - __ LoadImmediate(V0, 42); - __ coled(D0, D1); - __ bc1t(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1COLE, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1COLE_not_taken, assembler) { - Label is_true; - - __ LoadImmediate(D0, 42.5); - __ LoadImmediate(D1, 42.0); - __ LoadImmediate(V0, 42); - __ coled(D0, D1); - __ bc1t(&is_true); - __ mov(V0, ZR); - __ Bind(&is_true); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1COLE_not_taken, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1TruncWD, assembler) { - __ LoadImmediate(D1, 42.9); - __ truncwd(F0, D1); - __ mfc1(V0, F0); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1TruncWD, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1TruncWD_neg, assembler) { - __ LoadImmediate(D1, -42.9); - __ truncwd(F0, D1); - __ mfc1(V0, F0); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1TruncWD_neg, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - EXPECT_EQ(-42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1TruncWD_NaN, assembler) { - // Double non-signaling NaN is 0x7FF8000000000000. - __ LoadImmediate(T0, 0x7FF80000); - __ mtc1(ZR, F2); // Load upper bits of NaN. - __ mtc1(T0, F3); // Load lower bits of NaN. - __ truncwd(F0, D1); - __ mfc1(V0, F0); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1TruncWD_NaN, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - EXPECT_EQ(kMaxInt32, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1TruncWD_Inf, assembler) { - __ LoadImmediate(T0, 0x7FF00000); // +inf - __ mtc1(ZR, F2); - __ mtc1(T0, F3); - __ truncwd(F0, D1); - __ mfc1(V0, F0); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1TruncWD_Inf, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - EXPECT_EQ(kMaxInt32, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1TruncWD_Overflow, assembler) { - __ LoadImmediate(D1, 2.0 * kMaxInt32); - __ truncwd(F0, D1); - __ mfc1(V0, F0); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1TruncWD_Overflow, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - EXPECT_EQ(kMaxInt32, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1TruncWD_Underflow, assembler) { - __ LoadImmediate(D1, 2.0 * kMinInt32); - __ truncwd(F0, D1); - __ mfc1(V0, F0); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1TruncWD_Underflow, test) { - typedef int (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - EXPECT_EQ(kMaxInt32, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); -} - - -ASSEMBLER_TEST_GENERATE(Cop1CvtDW, assembler) { - __ LoadImmediate(T0, 42); - __ mtc1(T0, F2); - __ cvtdw(D0, F2); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1CvtDW, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(42.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Cop1CvtDW_neg, assembler) { - __ LoadImmediate(T0, -42); - __ mtc1(T0, F2); - __ cvtdw(D0, F2); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1CvtDW_neg, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(-42.0, res, 0.001); -} - - -ASSEMBLER_TEST_GENERATE(Cop1CvtSD, assembler) { - __ LoadImmediate(D2, -42.42); - __ cvtsd(F2, D2); - __ cvtds(D0, F2); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Cop1CvtSD, test) { - typedef double (*SimpleCode)() DART_UNUSED; - EXPECT(test != NULL); - double res = EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()); - EXPECT_FLOAT_EQ(-42.42, res, 0.001); -} - - -// Called from assembler_test.cc. -// RA: return address. -// A0: value. -// A1: growable array. -// A2: current thread. -ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) { - __ addiu(SP, SP, Immediate(-3 * kWordSize)); - __ sw(CODE_REG, Address(SP, 2 * kWordSize)); - __ sw(THR, Address(SP, 1 * kWordSize)); - __ sw(RA, Address(SP, 0 * kWordSize)); - __ mov(THR, A2); - __ StoreIntoObject(A1, FieldAddress(A1, GrowableObjectArray::data_offset()), - A0); - __ lw(RA, Address(SP, 0 * kWordSize)); - __ lw(THR, Address(SP, 1 * kWordSize)); - __ lw(CODE_REG, Address(SP, 2 * kWordSize)); - __ addiu(SP, SP, Immediate(3 * kWordSize)); - __ Ret(); -} - - -ASSEMBLER_TEST_GENERATE(Semaphore, assembler) { - __ EnterFrame(); - __ LoadImmediate(T0, 40); - __ LoadImmediate(T1, 42); - __ Push(T0); - Label retry; - __ Bind(&retry); - __ ll(T0, Address(SP)); - __ mov(T2, T1); - __ sc(T2, Address(SP)); // T1 == 1, success - __ LoadImmediate(T3, 1); - __ bne(T2, T3, &retry); // NE if context switch occurred between ll and sc - __ Pop(V0); // 42 - __ LeaveFrameAndReturn(); -} - - -ASSEMBLER_TEST_RUN(Semaphore, test) { - EXPECT(test != NULL); - typedef int (*Semaphore)() DART_UNUSED; - EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Semaphore, test->entry())); -} - - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/assembler_x64.cc b/runtime/vm/assembler_x64.cc index 0d854e76772..0a3f6dcfa91 100644 --- a/runtime/vm/assembler_x64.cc +++ b/runtime/vm/assembler_x64.cc @@ -27,7 +27,7 @@ Assembler::Assembler(bool use_far_branches) has_single_entry_point_(true), comments_(), constant_pool_allowed_(false) { - // Far branching mode is only needed and implemented for MIPS and ARM. + // Far branching mode is only needed and implemented for ARM. ASSERT(!use_far_branches); } diff --git a/runtime/vm/atomic_linux.h b/runtime/vm/atomic_linux.h index b4cdffb0f60..c1f7e2356d3 100644 --- a/runtime/vm/atomic_linux.h +++ b/runtime/vm/atomic_linux.h @@ -32,12 +32,7 @@ inline void AtomicOperations::IncrementBy(intptr_t* p, intptr_t value) { inline void AtomicOperations::IncrementInt64By(int64_t* p, int64_t value) { -#if defined(TARGET_ARCH_MIPS) - // No double-word atomics on MIPS32. - *p += value; -#else __sync_fetch_and_add(p, value); -#endif } diff --git a/runtime/vm/code_patcher_mips.cc b/runtime/vm/code_patcher_mips.cc deleted file mode 100644 index a6dab709cbc..00000000000 --- a/runtime/vm/code_patcher_mips.cc +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. -#if defined(TARGET_ARCH_MIPS) - -#include "vm/code_patcher.h" - -#include "vm/instructions.h" -#include "vm/object.h" - -namespace dart { - -RawCode* CodePatcher::GetStaticCallTargetAt(uword return_address, - const Code& code) { - ASSERT(code.ContainsInstructionAt(return_address)); - CallPattern call(return_address, code); - return call.TargetCode(); -} - - -void CodePatcher::PatchStaticCallAt(uword return_address, - const Code& code, - const Code& new_target) { - ASSERT(code.ContainsInstructionAt(return_address)); - CallPattern call(return_address, code); - call.SetTargetCode(new_target); -} - - -void CodePatcher::InsertDeoptimizationCallAt(uword start) { - UNREACHABLE(); -} - - -RawCode* CodePatcher::GetInstanceCallAt(uword return_address, - const Code& code, - ICData* ic_data) { - ASSERT(code.ContainsInstructionAt(return_address)); - CallPattern call(return_address, code); - if (ic_data != NULL) { - *ic_data = call.IcData(); - } - return call.TargetCode(); -} - - -intptr_t CodePatcher::InstanceCallSizeInBytes() { - // The instance call instruction sequence has a variable size on MIPS. - UNREACHABLE(); - return 0; -} - - -RawFunction* CodePatcher::GetUnoptimizedStaticCallAt(uword return_address, - const Code& code, - ICData* ic_data_result) { - ASSERT(code.ContainsInstructionAt(return_address)); - CallPattern static_call(return_address, code); - ICData& ic_data = ICData::Handle(); - ic_data ^= static_call.IcData(); - if (ic_data_result != NULL) { - *ic_data_result = ic_data.raw(); - } - return ic_data.GetTargetAt(0); -} - - -void CodePatcher::PatchSwitchableCallAt(uword return_address, - const Code& caller_code, - const Object& data, - const Code& target) { - ASSERT(caller_code.ContainsInstructionAt(return_address)); - SwitchableCallPattern call(return_address, caller_code); - call.SetData(data); - call.SetTarget(target); -} - - -RawCode* CodePatcher::GetSwitchableCallTargetAt(uword return_address, - const Code& caller_code) { - ASSERT(caller_code.ContainsInstructionAt(return_address)); - SwitchableCallPattern call(return_address, caller_code); - return call.target(); -} - - -RawObject* CodePatcher::GetSwitchableCallDataAt(uword return_address, - const Code& caller_code) { - ASSERT(caller_code.ContainsInstructionAt(return_address)); - SwitchableCallPattern call(return_address, caller_code); - return call.data(); -} - - -void CodePatcher::PatchNativeCallAt(uword return_address, - const Code& code, - NativeFunction target, - const Code& trampoline) { - ASSERT(code.ContainsInstructionAt(return_address)); - NativeCallPattern call(return_address, code); - call.set_target(trampoline); - call.set_native_function(target); -} - - -RawCode* CodePatcher::GetNativeCallAt(uword return_address, - const Code& code, - NativeFunction* target) { - ASSERT(code.ContainsInstructionAt(return_address)); - NativeCallPattern call(return_address, code); - *target = call.native_function(); - return call.target(); -} - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/code_patcher_mips_test.cc b/runtime/vm/code_patcher_mips_test.cc deleted file mode 100644 index 8662b8dace8..00000000000 --- a/runtime/vm/code_patcher_mips_test.cc +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -#include "vm/assembler.h" -#include "vm/code_patcher.h" -#include "vm/dart_entry.h" -#include "vm/instructions.h" -#include "vm/native_entry.h" -#include "vm/native_entry_test.h" -#include "vm/runtime_entry.h" -#include "vm/stub_code.h" -#include "vm/symbols.h" -#include "vm/unit_test.h" - -namespace dart { - -#define __ assembler-> - -ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) { - Thread* thread = Thread::Current(); - const String& class_name = String::Handle(Symbols::New(thread, "ownerClass")); - const Script& script = Script::Handle(); - const Class& owner_class = Class::Handle(Class::New( - Library::Handle(), class_name, script, TokenPosition::kNoSource)); - const String& function_name = - String::Handle(Symbols::New(thread, "callerFunction")); - const Function& function = Function::Handle(Function::New( - function_name, RawFunction::kRegularFunction, true, false, false, false, - false, owner_class, TokenPosition::kNoSource)); - - const String& target_name = String::Handle(String::New("targetFunction")); - const intptr_t kTypeArgsLen = 0; - const intptr_t kNumArgs = 1; - const Array& args_descriptor = Array::Handle( - ArgumentsDescriptor::New(kTypeArgsLen, kNumArgs, Object::null_array())); - const ICData& ic_data = ICData::ZoneHandle( - ICData::New(function, target_name, args_descriptor, 15, 1, false)); - - __ LoadObject(S5, ic_data); - __ BranchLinkPatchable(*StubCode::OneArgCheckInlineCache_entry()); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(IcDataAccess, test) { - uword end = test->payload_start() + test->code().Size(); - uword return_address = end - 2 * Instr::kInstrSize; - ICData& ic_data = ICData::Handle(); - CodePatcher::GetInstanceCallAt(return_address, test->code(), &ic_data); - EXPECT_STREQ("targetFunction", - String::Handle(ic_data.target_name()).ToCString()); - EXPECT_EQ(1, ic_data.NumArgsTested()); - EXPECT_EQ(0, ic_data.NumberOfChecks()); -} - -} // namespace dart - -#endif // TARGET_ARCH_MIPS diff --git a/runtime/vm/compiler.cc b/runtime/vm/compiler.cc index 5f88c665aea..84d58a6ade4 100644 --- a/runtime/vm/compiler.cc +++ b/runtime/vm/compiler.cc @@ -718,11 +718,10 @@ RawCode* CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { HANDLESCOPE(thread()); // We may reattempt compilation if the function needs to be assembled using - // far branches on ARM and MIPS. In the else branch of the setjmp call, - // done is set to false, and use_far_branches is set to true if there is a - // longjmp from the ARM or MIPS assemblers. In all other paths through this - // while loop, done is set to true. use_far_branches is always false on ia32 - // and x64. + // far branches on ARM. In the else branch of the setjmp call, done is set to + // false, and use_far_branches is set to true if there is a longjmp from the + // ARM assembler. In all other paths through this while loop, done is set to + // true. use_far_branches is always false on ia32 and x64. volatile bool done = false; // volatile because the variable may be clobbered by a longjmp. volatile bool use_far_branches = false; diff --git a/runtime/vm/constants_mips.h b/runtime/vm/constants_mips.h deleted file mode 100644 index cb13a4a1772..00000000000 --- a/runtime/vm/constants_mips.h +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#ifndef RUNTIME_VM_CONSTANTS_MIPS_H_ -#define RUNTIME_VM_CONSTANTS_MIPS_H_ - -#include "platform/assert.h" - -namespace dart { - -enum Register { - R0 = 0, - R1 = 1, // AT aka TMP - R2 = 2, - R3 = 3, - R4 = 4, - R5 = 5, - R6 = 6, - R7 = 7, - R8 = 8, - R9 = 9, - R10 = 10, - R11 = 11, - R12 = 12, - R13 = 13, - R14 = 14, - R15 = 15, - R16 = 16, - R17 = 17, - R18 = 18, - R19 = 19, // THR - R20 = 20, - R21 = 21, - R22 = 22, // CTX - R23 = 23, // PP - R24 = 24, - R25 = 25, - R26 = 26, - R27 = 27, - R28 = 28, - R29 = 29, // SP - R30 = 30, // FP - R31 = 31, // RA - kNumberOfCpuRegisters = 32, - IMM = 32, // Positive value is easier to encode than kNoRegister in bitfield. - kNoRegister = -1, // Signals an illegal register. - - - // Register aliases. - ZR = R0, - AT = R1, - - V0 = R2, - V1 = R3, - - A0 = R4, - A1 = R5, - A2 = R6, - A3 = R7, - - T0 = R8, - T1 = R9, - T2 = R10, - T3 = R11, - T4 = R12, - T5 = R13, - T6 = R14, - T7 = R15, - - S0 = R16, - S1 = R17, - S2 = R18, - S3 = R19, - S4 = R20, - S5 = R21, - S6 = R22, - S7 = R23, - - T8 = R24, - T9 = R25, - - K0 = R26, - K1 = R27, - - GP = R28, - SP = R29, - FP = R30, - RA = R31, -}; - - -// Values for floating point registers. -// Double-precision values use register pairs. -enum FRegister { - F0 = 0, - F1 = 1, - F2 = 2, - F3 = 3, - F4 = 4, - F5 = 5, - F6 = 6, - F7 = 7, - F8 = 8, - F9 = 9, - F10 = 10, - F11 = 11, - F12 = 12, - F13 = 13, - F14 = 14, - F15 = 15, - F16 = 16, - F17 = 17, - F18 = 18, - F19 = 19, - F20 = 20, - F21 = 21, - F22 = 22, - F23 = 23, - F24 = 24, - F25 = 25, - F26 = 26, - F27 = 27, - F28 = 28, - F29 = 29, - F30 = 30, - F31 = 31, - kNumberOfFRegisters = 32, - kNoFRegister = -1, -}; - -// The double precision floating point registers are concatenated pairs of the -// single precision registers, e.g. D0 is F1:F0, D1 is F3:F2, etc.. We only -// tell the architecture generic code about the double precision registers, then -// convert to the single precision registers when needed in the mips-specific -// code. -enum DRegister { - D0 = 0, // Function return value 1. - D1 = 1, // Function return value 2. - D2 = 2, // Not preserved. - D3 = 3, // Not preserved. - D4 = 4, // Not preserved. - D5 = 5, // Not preserved. - D6 = 6, // Argument 1. - D7 = 7, // Argument 2. - D8 = 8, // Not preserved. - D9 = 9, // Not preserved. - D10 = 10, // Preserved. - D11 = 11, // Preserved. - D12 = 12, // Preserved. - D13 = 13, // Preserved. - D14 = 14, // Preserved. - D15 = 15, // Preserved. - kNumberOfDRegisters = 16, - kNoDRegister = -1, -}; - -static inline FRegister EvenFRegisterOf(DRegister d) { - return static_cast(d * 2); -} - -static inline FRegister OddFRegisterOf(DRegister d) { - return static_cast((d * 2) + 1); -} - -const DRegister DTMP = D9; -const FRegister STMP1 = F18; -const FRegister STMP2 = F19; - -// Architecture independent aliases. -typedef DRegister FpuRegister; -const FpuRegister FpuTMP = DTMP; -const int kNumberOfFpuRegisters = kNumberOfDRegisters; -const FpuRegister kNoFpuRegister = kNoDRegister; - - -// Register aliases. -const Register TMP = AT; // Used as scratch register by assembler. -const Register TMP2 = kNoRegister; // No second assembler scratch register. -const Register CTX = S6; // Location of current context at method entry. -const Register CODE_REG = S6; -const Register PP = S7; // Caches object pool pointer in generated code. -const Register SPREG = SP; // Stack pointer register. -const Register FPREG = FP; // Frame pointer register. -const Register LRREG = RA; // Link register. -const Register ICREG = S5; // IC data register. -const Register ARGS_DESC_REG = S4; -const Register THR = S3; // Caches current thread in generated code. -const Register CALLEE_SAVED_TEMP = S5; - -// The code that generates a comparison can be far away from the code that -// generates the branch that uses the result of that comparison. In this case, -// CMPRES1 and CMPRES2 are used for the results of the comparison. We need two -// since TMP is clobbered by a far branch. -const Register CMPRES1 = T8; -const Register CMPRES2 = T9; - -// Exception object is passed in this register to the catch handlers when an -// exception is thrown. -const Register kExceptionObjectReg = V0; - -// Stack trace object is passed in this register to the catch handlers when -// an exception is thrown. -const Register kStackTraceObjectReg = V1; - - -typedef uint32_t RegList; -const RegList kAllCpuRegistersList = 0xFFFFFFFF; - -const RegList kAbiArgumentCpuRegs = - (1 << A0) | (1 << A1) | (1 << A2) | (1 << A3); -const RegList kAbiPreservedCpuRegs = (1 << S0) | (1 << S1) | (1 << S2) | - (1 << S3) | (1 << S4) | (1 << S5) | - (1 << S6) | (1 << S7); -const int kAbiPreservedCpuRegCount = 8; - -// FPU registers 20 - 31 are preserved across calls. -const FRegister kAbiFirstPreservedFpuReg = F20; -const FRegister kAbiLastPreservedFpuReg = - static_cast(kNumberOfFRegisters - 1); -const int kAbiPreservedFpuRegCount = 12; - -const RegList kReservedCpuRegisters = - (1 << SPREG) | (1 << FPREG) | (1 << TMP) | (1 << PP) | (1 << THR) | - (1 << CTX) | (1 << ZR) | (1 << CMPRES1) | (1 << CMPRES2) | (1 << K0) | - (1 << K1) | (1 << GP) | (1 << RA); -// CPU registers available to Dart allocator. -const RegList kDartAvailableCpuRegs = - kAllCpuRegistersList & ~kReservedCpuRegisters; -// Registers available to Dart that are not preserved by runtime calls. -const RegList kDartVolatileCpuRegs = - kDartAvailableCpuRegs & ~kAbiPreservedCpuRegs; -const int kDartVolatileCpuRegCount = 14; -const Register kDartFirstVolatileCpuReg = R2; -const Register kDartLastVolatileCpuReg = R15; - -// FPU registers 0 - 19 are not preserved across calls. -const FRegister kDartFirstVolatileFpuReg = F0; -const FRegister kDartLastVolatileFpuReg = F19; -const int kDartVolatileFpuRegCount = 20; - - -// There is no status register on MIPS. Instead of representing a condition -// code, type Condition (see assembler_mips.h) represents a pair of operands and -// a relation operator between them. -enum RelationOperator { - AL, // always - NV, // never - EQ, // equal - NE, // not equal - GT, // greater than - GE, // greater equal - LT, // less than - LE, // less equal - UGT, // unsigned greater than - UGE, // unsigned greater equal - ULT, // unsigned less than - ULE, // unsigned less equal - INVALID_RELATION -}; - - -// Constants used for the decoding or encoding of the individual fields of -// instructions. Based on the "Table 4.25 CPU Instruction Format Fields". -enum InstructionFields { - kOpcodeShift = 26, - kOpcodeBits = 6, - kRsShift = 21, - kRsBits = 5, - kFmtShift = 21, - kFmtBits = 5, - kRtShift = 16, - kRtBits = 5, - kFtShift = 16, - kFtBits = 5, - kRdShift = 11, - kRdBits = 5, - kFsShift = 11, - kFsBits = 5, - kSaShift = 6, - kSaBits = 5, - kFdShift = 6, - kFdBits = 5, - kFunctionShift = 0, - kFunctionBits = 6, - kCop1FnShift = 0, - kCop1FnBits = 6, - kCop1SubShift = 21, - kCop1SubBits = 5, - kImmShift = 0, - kImmBits = 16, - kInstrShift = 0, - kInstrBits = 26, - kBreakCodeShift = 6, - kBreakCodeBits = 20, - kFpuCCShift = 8, - kFpuCCBits = 3, - - kBranchOffsetMask = 0x0000ffff, -}; - - -enum Opcode { - SPECIAL = 0, - REGIMM = 1, - J = 2, - JAL = 3, - BEQ = 4, - BNE = 5, - BLEZ = 6, - BGTZ = 7, - ADDI = 8, - ADDIU = 9, - SLTI = 10, - SLTIU = 11, - ANDI = 12, - ORI = 13, - XORI = 14, - LUI = 15, - CPO0 = 16, - COP1 = 17, - COP2 = 18, - COP1X = 19, - BEQL = 20, - BNEL = 21, - BLEZL = 22, - BGTZL = 23, - SPECIAL2 = 28, - JALX = 29, - SPECIAL3 = 31, - LB = 32, - LH = 33, - LWL = 34, - LW = 35, - LBU = 36, - LHU = 37, - LWR = 38, - SB = 40, - SH = 41, - SWL = 42, - SW = 43, - SWR = 46, - CACHE = 47, - LL = 48, - LWC1 = 49, - LWC2 = 50, - PREF = 51, - LDC1 = 53, - LDC2 = 54, - SC = 56, - SWC1 = 57, - SWC2 = 58, - SDC1 = 61, - SDC2 = 62, -}; - - -enum SpecialFunction { - // SPECIAL opcodes. - SLL = 0, - MOVCI = 1, - SRL = 2, - SRA = 3, - SLLV = 4, - SRLV = 6, - SRAV = 7, - JR = 8, - JALR = 9, - MOVZ = 10, - MOVN = 11, - SYSCALL = 12, - BREAK = 13, - SYNC = 15, - MFHI = 16, - MTHI = 17, - MFLO = 18, - MTLO = 19, - MULT = 24, - MULTU = 25, - DIV = 26, - DIVU = 27, - ADD = 32, - ADDU = 33, - SUB = 34, - SUBU = 35, - AND = 36, - OR = 37, - XOR = 38, - NOR = 39, - SLT = 42, - SLTU = 43, - TGE = 48, - TGEU = 49, - TLT = 50, - TLTU = 51, - TEQ = 52, - TNE = 54, - - // SPECIAL2 opcodes. - MADD = 0, - MADDU = 1, - CLZ = 32, - CLO = 33, -}; - - -enum RtRegImm { - BLTZ = 0, - BGEZ = 1, - BLTZL = 2, - BGEZL = 3, - TGEI = 8, - TGEIU = 9, - TLTI = 10, - TLTIU = 11, - TEQI = 12, - TNEI = 14, - BLTZAL = 16, - BGEZAL = 17, - BLTZALL = 18, - BGEZALL = 19, - SYNCI = 31, -}; - - -enum Cop1Function { - COP1_ADD = 0x00, - COP1_SUB = 0x01, - COP1_MUL = 0x02, - COP1_DIV = 0x03, - COP1_SQRT = 0x04, - COP1_MOV = 0x06, - COP1_NEG = 0x07, - COP1_TRUNC_W = 0x0d, - COP1_CVT_S = 0x20, - COP1_CVT_D = 0x21, - COP1_C_F = 0x30, - COP1_C_UN = 0x31, - COP1_C_EQ = 0x32, - COP1_C_UEQ = 0x33, - COP1_C_OLT = 0x34, - COP1_C_ULT = 0x35, - COP1_C_OLE = 0x36, - COP1_C_ULE = 0x37, -}; - - -enum Cop1Sub { - COP1_MF = 0, - COP1_MT = 4, - COP1_BC = 8, -}; - - -enum Format { - FMT_S = 16, - FMT_D = 17, - FMT_W = 20, - FMT_L = 21, - FMT_PS = 22, -}; - - -class Instr { - public: - enum { - kInstrSize = 4, - }; - - static const int32_t kNopInstruction = 0; - - // Reserved break instruction codes. - static const int32_t kBreakPointCode = 0xdeb0; // For breakpoint. - static const int32_t kStopMessageCode = 0xdeb1; // For Stop(message). - static const int32_t kSimulatorBreakCode = 0xdeb2; // For breakpoint in sim. - static const int32_t kSimulatorRedirectCode = 0xca11; // For redirection. - - static const int32_t kBreakPointZeroInstruction = - (SPECIAL << kOpcodeShift) | (BREAK << kFunctionShift); - - // Breakpoint instruction filling assembler code buffers in debug mode. - static const int32_t kBreakPointInstruction = - kBreakPointZeroInstruction | (kBreakPointCode << kBreakCodeShift); - - // Breakpoint instruction used by the simulator. - // Should be distinct from kBreakPointInstruction and from a typical user - // breakpoint inserted in generated code for debugging, e.g. break_(0). - static const int32_t kSimulatorBreakpointInstruction = - kBreakPointZeroInstruction | (kSimulatorBreakCode << kBreakCodeShift); - - // Runtime call redirection instruction used by the simulator. - static const int32_t kSimulatorRedirectInstruction = - kBreakPointZeroInstruction | (kSimulatorRedirectCode << kBreakCodeShift); - - // Get the raw instruction bits. - inline int32_t InstructionBits() const { - return *reinterpret_cast(this); - } - - // Set the raw instruction bits to value. - inline void SetInstructionBits(int32_t value) { - *reinterpret_cast(this) = value; - } - - inline void SetImmInstrBits(Opcode op, - Register rs, - Register rt, - uint16_t imm) { - SetInstructionBits(op << kOpcodeShift | rs << kRsShift | rt << kRtShift | - imm << kImmShift); - } - - inline void SetSpecialInstrBits(SpecialFunction f, - Register rs, - Register rt, - Register rd) { - SetInstructionBits(SPECIAL << kOpcodeShift | f << kFunctionShift | - rs << kRsShift | rt << kRtShift | rd << kRdShift); - } - - // Read one particular bit out of the instruction bits. - inline int32_t Bit(int nr) const { return (InstructionBits() >> nr) & 1; } - - // Read a bit field out of the instruction bits. - inline int32_t Bits(int shift, int count) const { - return (InstructionBits() >> shift) & ((1 << count) - 1); - } - - // Accessors to the different named fields used in the MIPS encoding. - inline Opcode OpcodeField() const { - return static_cast(Bits(kOpcodeShift, kOpcodeBits)); - } - - inline void SetOpcodeField(Opcode b) { - int32_t instr = InstructionBits(); - int32_t mask = ((1 << kOpcodeBits) - 1) << kOpcodeShift; - SetInstructionBits((b << kOpcodeShift) | (instr & ~mask)); - } - - inline Register RsField() const { - return static_cast(Bits(kRsShift, kRsBits)); - } - - inline Register RtField() const { - return static_cast(Bits(kRtShift, kRtBits)); - } - - inline Register RdField() const { - return static_cast(Bits(kRdShift, kRdBits)); - } - - inline FRegister FsField() const { - return static_cast(Bits(kFsShift, kFsBits)); - } - - inline FRegister FtField() const { - return static_cast(Bits(kFtShift, kFtBits)); - } - - inline FRegister FdField() const { - return static_cast(Bits(kFdShift, kFdBits)); - } - - inline int SaField() const { return Bits(kSaShift, kSaBits); } - - inline int32_t UImmField() const { return Bits(kImmShift, kImmBits); } - - inline int32_t SImmField() const { - // Sign-extend the imm field. - return (Bits(kImmShift, kImmBits) << (32 - kImmBits)) >> (32 - kImmBits); - } - - inline int32_t BreakCodeField() const { - return Bits(kBreakCodeShift, kBreakCodeBits); - } - - inline SpecialFunction FunctionField() const { - return static_cast(Bits(kFunctionShift, kFunctionBits)); - } - - inline RtRegImm RegImmFnField() const { - return static_cast(Bits(kRtShift, kRtBits)); - } - - inline void SetRegImmFnField(RtRegImm b) { - int32_t instr = InstructionBits(); - int32_t mask = ((1 << kRtBits) - 1) << kRtShift; - SetInstructionBits((b << kRtShift) | (instr & ~mask)); - } - - inline bool IsBreakPoint() { - return (OpcodeField() == SPECIAL) && (FunctionField() == BREAK); - } - - inline Cop1Function Cop1FunctionField() const { - return static_cast(Bits(kCop1FnShift, kCop1FnBits)); - } - - inline Cop1Sub Cop1SubField() const { - return static_cast(Bits(kCop1SubShift, kCop1SubBits)); - } - - inline bool HasFormat() const { - return (OpcodeField() == COP1) && (Bit(25) == 1); - } - - inline Format FormatField() const { - return static_cast(Bits(kFmtShift, kFmtBits)); - } - - inline int32_t FpuCCField() const { return Bits(kFpuCCShift, kFpuCCBits); } - - // Instructions are read out of a code stream. The only way to get a - // reference to an instruction is to convert a pc. There is no way - // to allocate or create instances of class Instr. - // Use the At(pc) function to create references to Instr. - static Instr* At(uword pc) { return reinterpret_cast(pc); } - -#if defined(DEBUG) - inline void AssertIsImmInstr(Opcode op, - Register rs, - Register rt, - int32_t imm) { - ASSERT((OpcodeField() == op) && (RsField() == rs) && (RtField() == rt) && - (SImmField() == imm)); - } - - inline void AssertIsSpecialInstr(SpecialFunction f, - Register rs, - Register rt, - Register rd) { - ASSERT((OpcodeField() == SPECIAL) && (FunctionField() == f) && - (RsField() == rs) && (RtField() == rt) && (RdField() == rd)); - } -#endif // defined(DEBUG) - - private: - DISALLOW_ALLOCATION(); - DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); -}; - -} // namespace dart - -#endif // RUNTIME_VM_CONSTANTS_MIPS_H_ diff --git a/runtime/vm/cpu.h b/runtime/vm/cpu.h index cdd0c25888f..1ebb2bfddf7 100644 --- a/runtime/vm/cpu.h +++ b/runtime/vm/cpu.h @@ -31,8 +31,6 @@ class CPU : public AllStatic { #include "vm/cpu_arm.h" #elif defined(TARGET_ARCH_ARM64) #include "vm/cpu_arm64.h" -#elif defined(TARGET_ARCH_MIPS) -#include "vm/cpu_mips.h" #elif defined(TARGET_ARCH_DBC) #include "vm/cpu_dbc.h" #else diff --git a/runtime/vm/cpu_mips.cc b/runtime/vm/cpu_mips.cc deleted file mode 100644 index 902f59cb0f1..00000000000 --- a/runtime/vm/cpu_mips.cc +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -#include "vm/cpu.h" -#include "vm/cpu_mips.h" - -#include "vm/cpuinfo.h" -#include "vm/simulator.h" - -#if !defined(USING_SIMULATOR) -#include /* NOLINT */ -#include /* NOLINT */ -#include /* NOLINT */ -#endif - -namespace dart { - -void CPU::FlushICache(uword start, uword size) { -#if !defined(USING_SIMULATOR) - int res; - // See http://www.linux-mips.org/wiki/Cacheflush_Syscall. - res = syscall(__NR_cacheflush, start, size, ICACHE); - ASSERT(res == 0); -#else // defined(HOST_ARCH_MIPS) -// When running in simulated mode we do not need to flush the ICache because -// we are not running on the actual hardware. -#endif // defined(HOST_ARCH_MIPS) -} - - -const char* CPU::Id() { - return -#if defined(USING_SIMULATOR) - "sim" -#endif // !defined(HOST_ARCH_MIPS) - "mips"; -} - - -const char* HostCPUFeatures::hardware_ = NULL; -MIPSVersion HostCPUFeatures::mips_version_ = MIPSvUnknown; -#if defined(DEBUG) -bool HostCPUFeatures::initialized_ = false; -#endif - - -#if !defined(USING_SIMULATOR) -void HostCPUFeatures::InitOnce() { - CpuInfo::InitOnce(); - hardware_ = CpuInfo::GetCpuModel(); - // Has a floating point unit. - ASSERT(CpuInfo::FieldContains(kCpuInfoModel, "FPU")); - -// We want to know the ISA version, but on MIPS, CpuInfo can't tell us, so -// we use the same ISA version that Dart's C++ compiler targeted. -#if defined(_MIPS_ARCH_MIPS32R2) - mips_version_ = MIPS32r2; -#elif defined(_MIPS_ARCH_MIPS32) - mips_version_ = MIPS32; -#endif - -#if defined(DEBUG) - initialized_ = true; -#endif -} - - -void HostCPUFeatures::Cleanup() { - DEBUG_ASSERT(initialized_); -#if defined(DEBUG) - initialized_ = false; -#endif - ASSERT(hardware_ != NULL); - free(const_cast(hardware_)); - hardware_ = NULL; - CpuInfo::Cleanup(); -} - -#else - -void HostCPUFeatures::InitOnce() { - CpuInfo::InitOnce(); - hardware_ = CpuInfo::GetCpuModel(); - mips_version_ = MIPS32r2; -#if defined(DEBUG) - initialized_ = true; -#endif -} - - -void HostCPUFeatures::Cleanup() { - DEBUG_ASSERT(initialized_); -#if defined(DEBUG) - initialized_ = false; -#endif - ASSERT(hardware_ != NULL); - free(const_cast(hardware_)); - hardware_ = NULL; - CpuInfo::Cleanup(); -} -#endif // defined(HOST_ARCH_MIPS) - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/cpu_mips.h b/runtime/vm/cpu_mips.h deleted file mode 100644 index 8af35b8826f..00000000000 --- a/runtime/vm/cpu_mips.h +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#ifndef RUNTIME_VM_CPU_MIPS_H_ -#define RUNTIME_VM_CPU_MIPS_H_ - -#include "vm/allocation.h" - -namespace dart { - -// TargetCPUFeatures gives CPU features for the architecture that we are -// generating code for. HostCPUFeatures gives the CPU features for the -// architecture that we are actually running on. When the architectures -// are the same, TargetCPUFeatures will query HostCPUFeatures. When they are -// different (i.e. we are running in a simulator), HostCPUFeatures will -// additionally mock the options needed for the target architecture so that -// they may be altered for testing. - -enum MIPSVersion { - MIPS32, - MIPS32r2, - MIPSvUnknown, -}; - -class HostCPUFeatures : public AllStatic { - public: - static void InitOnce(); - static void Cleanup(); - static const char* hardware() { - DEBUG_ASSERT(initialized_); - return hardware_; - } - static MIPSVersion mips_version() { - DEBUG_ASSERT(initialized_); - return mips_version_; - } - - private: - static const char* hardware_; - static MIPSVersion mips_version_; -#if defined(DEBUG) - static bool initialized_; -#endif -}; - -class TargetCPUFeatures : public AllStatic { - public: - static void InitOnce() { HostCPUFeatures::InitOnce(); } - static void Cleanup() { HostCPUFeatures::Cleanup(); } - static const char* hardware() { return HostCPUFeatures::hardware(); } - static bool double_truncate_round_supported() { return false; } - static MIPSVersion mips_version() { return HostCPUFeatures::mips_version(); } -}; - -} // namespace dart - -#endif // RUNTIME_VM_CPU_MIPS_H_ diff --git a/runtime/vm/cpu_test.cc b/runtime/vm/cpu_test.cc index 1adb7483b9c..ed76a8836bb 100644 --- a/runtime/vm/cpu_test.cc +++ b/runtime/vm/cpu_test.cc @@ -26,12 +26,6 @@ VM_UNIT_TEST_CASE(Id) { #else // defined(HOST_ARCH_ARM64) EXPECT_STREQ("simarm64", CPU::Id()); #endif // defined(HOST_ARCH_ARM64) -#elif defined(TARGET_ARCH_MIPS) -#if defined(HOST_ARCH_MIPS) - EXPECT_STREQ("mips", CPU::Id()); -#else // defined(HOST_ARCH_MIPS) - EXPECT_STREQ("simmips", CPU::Id()); -#endif // defined(HOST_ARCH_MIPS) #elif defined(TARGET_ARCH_DBC) EXPECT_STREQ("dbc", CPU::Id()); #else diff --git a/runtime/vm/cpuinfo_android.cc b/runtime/vm/cpuinfo_android.cc index 23bca81b59e..9e50248fa29 100644 --- a/runtime/vm/cpuinfo_android.cc +++ b/runtime/vm/cpuinfo_android.cc @@ -32,12 +32,6 @@ void CpuInfo::InitOnce() { fields_[kCpuInfoHardware] = "Hardware"; fields_[kCpuInfoFeatures] = "Features"; fields_[kCpuInfoArchitecture] = "CPU architecture"; -#elif defined(HOST_ARCH_MIPS) - fields_[kCpuInfoProcessor] = "system type"; - fields_[kCpuInfoModel] = "cpu model"; - fields_[kCpuInfoHardware] = "cpu model"; - fields_[kCpuInfoFeatures] = "ASEs implemented"; - fields_[kCpuInfoArchitecture] = "CPU architecture"; #else #error Unrecognized target architecture #endif diff --git a/runtime/vm/cpuinfo_linux.cc b/runtime/vm/cpuinfo_linux.cc index 232b4091c59..3289a157398 100644 --- a/runtime/vm/cpuinfo_linux.cc +++ b/runtime/vm/cpuinfo_linux.cc @@ -12,7 +12,7 @@ #include "platform/assert.h" // As with Windows, on IA32 and X64, we use the cpuid instruction. -// The analogous instruction is privileged on ARM and MIPS, so we resort to +// The analogous instruction is privileged on ARM, so we resort to // reading from /proc/cpuinfo. namespace dart { @@ -45,14 +45,6 @@ void CpuInfo::InitOnce() { fields_[kCpuInfoArchitecture] = "CPU architecture"; method_ = kCpuInfoSystem; ProcCpuInfo::InitOnce(); -#elif defined(HOST_ARCH_MIPS) - fields_[kCpuInfoProcessor] = "system type"; - fields_[kCpuInfoModel] = "cpu model"; - fields_[kCpuInfoHardware] = "cpu model"; - fields_[kCpuInfoFeatures] = "ASEs implemented"; - fields_[kCpuInfoArchitecture] = "CPU architecture"; - method_ = kCpuInfoSystem; - ProcCpuInfo::InitOnce(); #else #error Unrecognized target architecture #endif diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc index b9d15660859..33ca87bf6ba 100644 --- a/runtime/vm/dart.cc +++ b/runtime/vm/dart.cc @@ -97,16 +97,6 @@ static void CheckOffsets() { CHECK_OFFSET(Isolate::object_store_offset(), 28); NOT_IN_PRODUCT(CHECK_OFFSET(sizeof(ClassHeapStats), 120)); #endif -#if defined(TARGET_ARCH_MIPS) - // These offsets are embedded in precompiled instructions. We need simmips - // (compiler) and mips (runtime) to agree. - CHECK_OFFSET(Heap::TopOffset(Heap::kNew), 8); - CHECK_OFFSET(Thread::stack_limit_offset(), 4); - CHECK_OFFSET(Thread::object_null_offset(), 40); - CHECK_OFFSET(SingleTargetCache::upper_limit_offset(), 14); - CHECK_OFFSET(Isolate::object_store_offset(), 28); - NOT_IN_PRODUCT(CHECK_OFFSET(sizeof(ClassHeapStats), 120)); -#endif #if defined(TARGET_ARCH_ARM64) // These offsets are embedded in precompiled instructions. We need simarm64 // (compiler) and arm64 (runtime) to agree. @@ -701,8 +691,6 @@ const char* Dart::FeaturesString(Isolate* isolate, Snapshot::Kind kind) { : " softfp"); #elif defined(TARGET_ARCH_ARM64) buffer.AddString(" arm64"); -#elif defined(TARGET_ARCH_MIPS) - buffer.AddString(" mips"); #elif defined(TARGET_ARCH_IA32) buffer.AddString(" ia32"); #elif defined(TARGET_ARCH_X64) diff --git a/runtime/vm/debugger_mips.cc b/runtime/vm/debugger_mips.cc deleted file mode 100644 index 5fd151af927..00000000000 --- a/runtime/vm/debugger_mips.cc +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -#include "vm/code_patcher.h" -#include "vm/cpu.h" -#include "vm/debugger.h" -#include "vm/instructions.h" -#include "vm/stub_code.h" - -namespace dart { - -#ifndef PRODUCT - -RawCode* CodeBreakpoint::OrigStubAddress() const { - return saved_value_; -} - - -void CodeBreakpoint::PatchCode() { - ASSERT(!is_enabled_); - Code& stub_target = Code::Handle(); - switch (breakpoint_kind_) { - case RawPcDescriptors::kIcCall: - case RawPcDescriptors::kUnoptStaticCall: - stub_target = StubCode::ICCallBreakpoint_entry()->code(); - break; - case RawPcDescriptors::kRuntimeCall: - stub_target = StubCode::RuntimeCallBreakpoint_entry()->code(); - break; - default: - UNREACHABLE(); - } - const Code& code = Code::Handle(code_); - saved_value_ = CodePatcher::GetStaticCallTargetAt(pc_, code); - CodePatcher::PatchStaticCallAt(pc_, code, stub_target); - is_enabled_ = true; -} - - -void CodeBreakpoint::RestoreCode() { - ASSERT(is_enabled_); - const Code& code = Code::Handle(code_); - switch (breakpoint_kind_) { - case RawPcDescriptors::kIcCall: - case RawPcDescriptors::kUnoptStaticCall: - case RawPcDescriptors::kRuntimeCall: { - CodePatcher::PatchStaticCallAt(pc_, code, Code::Handle(saved_value_)); - break; - } - default: - UNREACHABLE(); - } - is_enabled_ = false; -} - -#endif // !PRODUCT - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/deopt_instructions.cc b/runtime/vm/deopt_instructions.cc index d50513c79dc..3e26df5895a 100644 --- a/runtime/vm/deopt_instructions.cc +++ b/runtime/vm/deopt_instructions.cc @@ -73,7 +73,7 @@ DeoptContext::DeoptContext(const StackFrame* frame, function.HasOptionalParameters() ? 0 : function.num_fixed_parameters(); // The fixed size section of the (fake) Dart frame called via a stub by the -// optimized function contains FP, PP (ARM and MIPS only), PC-marker and +// optimized function contains FP, PP (ARM only), PC-marker and // return-address. This section is copied as well, so that its contained // values can be updated before returning to the deoptimized function. // Note: on DBC stack grows upwards unlike on all other architectures. diff --git a/runtime/vm/disassembler_mips.cc b/runtime/vm/disassembler_mips.cc deleted file mode 100644 index 23244b37071..00000000000 --- a/runtime/vm/disassembler_mips.cc +++ /dev/null @@ -1,792 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/disassembler.h" - -#include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. -#if defined(TARGET_ARCH_MIPS) -#include "platform/assert.h" -#include "vm/instructions.h" - -namespace dart { - -#ifndef PRODUCT - -class MIPSDecoder : public ValueObject { - public: - MIPSDecoder(char* buffer, size_t buffer_size) - : buffer_(buffer), buffer_size_(buffer_size), buffer_pos_(0) { - buffer_[buffer_pos_] = '\0'; - } - - ~MIPSDecoder() {} - - // Writes one disassembled instruction into 'buffer' (0-terminated). - // Returns true if the instruction was successfully decoded, false otherwise. - void InstructionDecode(Instr* instr); - - private: - // Bottleneck functions to print into the out_buffer. - void Print(const char* str); - - // Printing of common values. - void PrintRegister(Register reg); - void PrintFRegister(FRegister reg); - void PrintFormat(Instr* instr); - - int FormatRegister(Instr* instr, const char* format); - int FormatFRegister(Instr* instr, const char* format); - int FormatOption(Instr* instr, const char* format); - void Format(Instr* instr, const char* format); - void Unknown(Instr* instr); - - void DecodeSpecial(Instr* instr); - void DecodeSpecial2(Instr* instr); - void DecodeRegImm(Instr* instr); - void DecodeCop1(Instr* instr); - - // Convenience functions. - char* get_buffer() const { return buffer_; } - char* current_position_in_buffer() { return buffer_ + buffer_pos_; } - size_t remaining_size_in_buffer() { return buffer_size_ - buffer_pos_; } - - char* buffer_; // Decode instructions into this buffer. - size_t buffer_size_; // The size of the character buffer. - size_t buffer_pos_; // Current character position in buffer. - - DISALLOW_ALLOCATION(); - DISALLOW_COPY_AND_ASSIGN(MIPSDecoder); -}; - - -// Support for assertions in the MIPSDecoder formatting functions. -#define STRING_STARTS_WITH(string, compare_string) \ - (strncmp(string, compare_string, strlen(compare_string)) == 0) - - -// Append the str to the output buffer. -void MIPSDecoder::Print(const char* str) { - char cur = *str++; - while (cur != '\0' && (buffer_pos_ < (buffer_size_ - 1))) { - buffer_[buffer_pos_++] = cur; - cur = *str++; - } - buffer_[buffer_pos_] = '\0'; -} - - -static const char* reg_names[kNumberOfCpuRegisters] = { - "zr", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2", - "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "thr", "s4", "s5", - "s6", "pp", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra", -}; - - -static const char* freg_names[kNumberOfFRegisters] = { - "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", - "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", - "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", -}; - - -void MIPSDecoder::PrintRegister(Register reg) { - ASSERT(0 <= reg); - ASSERT(reg < kNumberOfCpuRegisters); - Print(reg_names[reg]); -} - - -void MIPSDecoder::PrintFRegister(FRegister reg) { - ASSERT(0 <= reg); - ASSERT(reg < kNumberOfFRegisters); - Print(freg_names[reg]); -} - - -// Handle all register based formatting in these functions to reduce the -// complexity of FormatOption. -int MIPSDecoder::FormatRegister(Instr* instr, const char* format) { - ASSERT(format[0] == 'r'); - switch (format[1]) { - case 's': { // 'rs: Rs register - PrintRegister(instr->RsField()); - return 2; - } - case 't': { // 'rt: Rt register - PrintRegister(instr->RtField()); - return 2; - } - case 'd': { // 'rd: Rd register - PrintRegister(instr->RdField()); - return 2; - } - } - UNREACHABLE(); - return -1; -} - - -int MIPSDecoder::FormatFRegister(Instr* instr, const char* format) { - ASSERT(format[0] == 'f'); - switch (format[1]) { - case 's': { // 'fs: Fs register - PrintFRegister(instr->FsField()); - return 2; - } - case 't': { // 'ft: Ft register - PrintFRegister(instr->FtField()); - return 2; - } - case 'd': { // 'fd: Fd register - PrintFRegister(instr->FdField()); - return 2; - } - } - UNREACHABLE(); - return -1; -} - - -void MIPSDecoder::PrintFormat(Instr* instr) { - switch (instr->FormatField()) { - case FMT_S: { - Print("s"); - break; - } - case FMT_D: { - Print("d"); - break; - } - case FMT_W: { - Print("w"); - break; - } - case FMT_L: { - Print("l"); - break; - } - case FMT_PS: { - Print("ps"); - break; - } - default: { - Print("unknown"); - break; - } - } -} - - -// FormatOption takes a formatting string and interprets it based on -// the current instructions. The format string points to the first -// character of the option string (the option escape has already been -// consumed by the caller.) FormatOption returns the number of -// characters that were consumed from the formatting string. -int MIPSDecoder::FormatOption(Instr* instr, const char* format) { - switch (format[0]) { - case 'c': { - ASSERT(STRING_STARTS_WITH(format, "code")); - buffer_pos_ += - OS::SNPrint(current_position_in_buffer(), remaining_size_in_buffer(), - "0x%x", instr->BreakCodeField()); - return 4; - } - case 'h': { - ASSERT(STRING_STARTS_WITH(format, "hint")); - if (instr->SaField() == 0x10) { - // The high bit of the SA field is the only one that means something for - // JALR and JR. TODO(zra): Fill in the other cases for PREF if needed. - buffer_pos_ += OS::SNPrint(current_position_in_buffer(), - remaining_size_in_buffer(), ".hb"); - } else if (instr->SaField() != 0) { - buffer_pos_ += OS::SNPrint(current_position_in_buffer(), - remaining_size_in_buffer(), ".unknown"); - } - return 4; - } - case 'd': { - ASSERT(STRING_STARTS_WITH(format, "dest")); - int off = instr->SImmField() << 2; - uword destination = - reinterpret_cast(instr) + off + Instr::kInstrSize; - buffer_pos_ += - OS::SNPrint(current_position_in_buffer(), remaining_size_in_buffer(), - "%#" Px "", destination); - return 4; - } - case 'i': { - ASSERT(STRING_STARTS_WITH(format, "imm")); - if (format[3] == 'u') { - int32_t imm = instr->UImmField(); - buffer_pos_ += OS::SNPrint(current_position_in_buffer(), - remaining_size_in_buffer(), "0x%x", imm); - } else { - ASSERT(STRING_STARTS_WITH(format, "imms")); - int32_t imm = instr->SImmField(); - buffer_pos_ += OS::SNPrint(current_position_in_buffer(), - remaining_size_in_buffer(), "%d", imm); - } - return 4; - } - case 'r': { - return FormatRegister(instr, format); - } - case 'f': { - if (format[1] == 'm') { - ASSERT(STRING_STARTS_WITH(format, "fmt")); - PrintFormat(instr); - return 3; - } else { - return FormatFRegister(instr, format); - } - } - case 's': { - ASSERT(STRING_STARTS_WITH(format, "sa")); - buffer_pos_ += - OS::SNPrint(current_position_in_buffer(), remaining_size_in_buffer(), - "%d", instr->SaField()); - return 2; - } - default: { UNREACHABLE(); } - } - UNREACHABLE(); - return -1; -} - - -// Format takes a formatting string for a whole instruction and prints it into -// the output buffer. All escaped options are handed to FormatOption to be -// parsed further. -void MIPSDecoder::Format(Instr* instr, const char* format) { - char cur = *format++; - while ((cur != 0) && (buffer_pos_ < (buffer_size_ - 1))) { - if (cur == '\'') { // Single quote is used as the formatting escape. - format += FormatOption(instr, format); - } else { - buffer_[buffer_pos_++] = cur; - } - cur = *format++; - } - buffer_[buffer_pos_] = '\0'; -} - - -// For currently unimplemented decodings the disassembler calls Unknown(instr) -// which will just print "unknown" of the instruction bits. -void MIPSDecoder::Unknown(Instr* instr) { - Format(instr, "unknown"); -} - - -void MIPSDecoder::DecodeSpecial(Instr* instr) { - ASSERT(instr->OpcodeField() == SPECIAL); - switch (instr->FunctionField()) { - case ADDU: { - Format(instr, "addu 'rd, 'rs, 'rt"); - break; - } - case AND: { - Format(instr, "and 'rd, 'rs, 'rt"); - break; - } - case BREAK: { - Format(instr, "break 'code"); - if (instr->BreakCodeField() == Instr::kStopMessageCode) { - const char* message = *reinterpret_cast( - reinterpret_cast(instr) - Instr::kInstrSize); - buffer_pos_ += - OS::SNPrint(current_position_in_buffer(), - remaining_size_in_buffer(), " ; \"%s\"", message); - } - break; - } - case DIV: { - Format(instr, "div 'rs, 'rt"); - break; - } - case DIVU: { - Format(instr, "divu 'rs, 'rt"); - break; - } - case JALR: { - Format(instr, "jalr'hint 'rd, 'rs"); - break; - } - case JR: { - Format(instr, "jr'hint 'rs"); - break; - } - case MFHI: { - Format(instr, "mfhi 'rd"); - break; - } - case MFLO: { - Format(instr, "mflo 'rd"); - break; - } - case MOVCI: { - if (instr->Bit(16)) { - Format(instr, "movt 'rd, 'rs"); - } else { - Format(instr, "movf 'rd, 'rs"); - } - break; - } - case MOVN: { - Format(instr, "movn 'rd, 'rs, 'rt"); - break; - } - case MOVZ: { - Format(instr, "movz 'rd, 'rs, 'rt"); - break; - } - case MTHI: { - Format(instr, "mthi 'rs"); - break; - } - case MTLO: { - Format(instr, "mtlo 'rs"); - break; - } - case MULT: { - Format(instr, "mult 'rs, 'rt"); - break; - } - case MULTU: { - Format(instr, "multu 'rs, 'rt"); - break; - } - case NOR: { - Format(instr, "nor 'rd, 'rs, 'rt"); - break; - } - case OR: { - if (instr->RsField() == 0 && instr->RtField() == 0) { - Format(instr, "mov 'rd, 0"); - } else if (instr->RsField() == R0) { - Format(instr, "mov 'rd, 'rt"); - } else if (instr->RtField() == R0) { - Format(instr, "mov 'rd, 'rs"); - } else { - Format(instr, "or 'rd, 'rs, 'rt"); - } - break; - } - case SLL: { - if ((instr->RdField() == R0) && (instr->RtField() == R0) && - (instr->SaField() == 0)) { - Format(instr, "nop"); - } else { - Format(instr, "sll 'rd, 'rt, 'sa"); - } - break; - } - case SLLV: { - Format(instr, "sllv 'rd, 'rt, 'rs"); - break; - } - case SLT: { - Format(instr, "slt 'rd, 'rs, 'rt"); - break; - } - case SLTU: { - Format(instr, "sltu 'rd, 'rs, 'rt"); - break; - } - case SRA: { - if (instr->RsField() == 0) { - Format(instr, "sra 'rd, 'rt, 'sa"); - } else { - Unknown(instr); - } - break; - } - case SRAV: { - Format(instr, "srav 'rd, 'rt, 'rs"); - break; - } - case SRL: { - if (instr->RsField() == 0) { - Format(instr, "srl 'rd, 'rt, 'sa"); - } else { - Unknown(instr); - } - break; - } - case SRLV: { - if (instr->SaField() == 0) { - Format(instr, "srlv 'rd, 'rt, 'rs"); - } else { - Unknown(instr); - } - break; - } - case SUBU: { - Format(instr, "subu 'rd, 'rs, 'rt"); - break; - } - case XOR: { - Format(instr, "xor 'rd, 'rs, 'rt"); - break; - } - default: { - Unknown(instr); - break; - } - } -} - - -void MIPSDecoder::DecodeSpecial2(Instr* instr) { - ASSERT(instr->OpcodeField() == SPECIAL2); - switch (instr->FunctionField()) { - case MADD: { - Format(instr, "madd 'rs, 'rt"); - break; - } - case MADDU: { - Format(instr, "maddu 'rs, 'rt"); - break; - } - case CLO: { - Format(instr, "clo 'rd, 'rs"); - break; - } - case CLZ: { - Format(instr, "clz 'rd, 'rs"); - break; - } - default: { - Unknown(instr); - break; - } - } -} - - -void MIPSDecoder::DecodeRegImm(Instr* instr) { - ASSERT(instr->OpcodeField() == REGIMM); - switch (instr->RegImmFnField()) { - case BGEZ: { - Format(instr, "bgez 'rs, 'dest"); - break; - } - case BGEZAL: { - Format(instr, "bgezal 'rs, 'dest"); - break; - } - case BLTZAL: { - Format(instr, "bltzal 'rs, 'dest"); - break; - } - case BGEZL: { - Format(instr, "bgezl 'rs, 'dest"); - break; - } - case BLTZ: { - Format(instr, "bltz 'rs, 'dest"); - break; - } - case BLTZL: { - Format(instr, "bltzl 'rs, 'dest"); - break; - } - default: { - Unknown(instr); - break; - } - } -} - -void MIPSDecoder::DecodeCop1(Instr* instr) { - ASSERT(instr->OpcodeField() == COP1); - if (instr->HasFormat()) { - // If the rs field is a valid format, then the function field identifies - // the instruction. - switch (instr->Cop1FunctionField()) { - case COP1_ADD: { - Format(instr, "add.'fmt 'fd, 'fs, 'ft"); - break; - } - case COP1_SUB: { - Format(instr, "sub.'fmt 'fd, 'fs, 'ft"); - break; - } - case COP1_MUL: { - Format(instr, "mul.'fmt 'fd, 'fs, 'ft"); - break; - } - case COP1_DIV: { - Format(instr, "div.'fmt 'fd, 'fs, 'ft"); - break; - } - case COP1_SQRT: { - Format(instr, "sqrt.'fmt 'fd, 'fs"); - break; - } - case COP1_MOV: { - Format(instr, "mov.'fmt 'fd, 'fs"); - break; - } - case COP1_NEG: { - Format(instr, "neg.'fmt 'fd, 'fs"); - break; - } - case COP1_C_F: { - Format(instr, "c.f.'fmt 'fs, 'ft"); - break; - } - case COP1_C_UN: { - Format(instr, "c.un.'fmt 'fs, 'ft"); - break; - } - case COP1_C_EQ: { - Format(instr, "c.eq.'fmt 'fs, 'ft"); - break; - } - case COP1_C_UEQ: { - Format(instr, "c.ueq.'fmt 'fs, 'ft"); - break; - } - case COP1_C_OLT: { - Format(instr, "c.olt.'fmt 'fs, 'ft"); - break; - } - case COP1_C_ULT: { - Format(instr, "c.ult.'fmt 'fs, 'ft"); - break; - } - case COP1_C_OLE: { - Format(instr, "c.ole.'fmt 'fs, 'ft"); - break; - } - case COP1_C_ULE: { - Format(instr, "c.ule.'fmt 'fs, 'ft"); - break; - } - case COP1_TRUNC_W: { - Format(instr, "trunc.w.'fmt 'fd, 'fs"); - break; - } - case COP1_CVT_S: { - Format(instr, "cvt.s.'fmt 'fd, 'fs"); - break; - } - case COP1_CVT_D: { - Format(instr, "cvt.d.'fmt 'fd, 'fs"); - break; - } - default: { - Unknown(instr); - break; - } - } - } else { - // If the rs field isn't a valid format, then it must be a sub-opcode. - switch (instr->Cop1SubField()) { - case COP1_MF: { - if (instr->Bits(0, 11) != 0) { - Unknown(instr); - } else { - Format(instr, "mfc1 'rt, 'fs"); - } - break; - } - case COP1_MT: { - if (instr->Bits(0, 11) != 0) { - Unknown(instr); - } else { - Format(instr, "mtc1 'rt, 'fs"); - } - break; - } - case COP1_BC: { - ASSERT(instr->Bit(17) == 0); - if (instr->Bit(16) == 1) { // Branch on true. - Format(instr, "bc1t 'dest"); - } else { // Branch on false. - Format(instr, "bc1f 'dest"); - } - break; - } - default: { - Unknown(instr); - break; - } - } - } -} - -void MIPSDecoder::InstructionDecode(Instr* instr) { - switch (instr->OpcodeField()) { - case SPECIAL: { - DecodeSpecial(instr); - break; - } - case SPECIAL2: { - DecodeSpecial2(instr); - break; - } - case REGIMM: { - DecodeRegImm(instr); - break; - } - case COP1: { - DecodeCop1(instr); - break; - } - case ADDIU: { - Format(instr, "addiu 'rt, 'rs, 'imms"); - break; - } - case ANDI: { - Format(instr, "andi 'rt, 'rs, 'immu"); - break; - } - case BEQ: { - Format(instr, "beq 'rs, 'rt, 'dest"); - break; - } - case BEQL: { - Format(instr, "beql 'rs, 'rt, 'dest"); - break; - } - case BGTZ: { - Format(instr, "bgtz 'rs, 'dest"); - break; - } - case BGTZL: { - Format(instr, "bgtzl 'rs, 'dest"); - break; - } - case BLEZ: { - Format(instr, "blez 'rs, 'dest"); - break; - } - case BLEZL: { - Format(instr, "blezl 'rs, 'dest"); - break; - } - case BNE: { - Format(instr, "bne 'rs, 'rt, 'dest"); - break; - } - case BNEL: { - Format(instr, "bnel 'rs, 'rt, 'dest"); - break; - } - case LB: { - Format(instr, "lb 'rt, 'imms('rs)"); - break; - } - case LBU: { - Format(instr, "lbu 'rt, 'imms('rs)"); - break; - } - case LDC1: { - Format(instr, "ldc1 'ft, 'imms('rs)"); - break; - } - case LH: { - Format(instr, "lh 'rt, 'imms('rs)"); - break; - } - case LHU: { - Format(instr, "lhu 'rt, 'imms('rs)"); - break; - } - case LUI: { - Format(instr, "lui 'rt, 'immu"); - break; - } - case LL: { - Format(instr, "ll 'rt, 'imms('rs)"); - break; - } - case LW: { - Format(instr, "lw 'rt, 'imms('rs)"); - break; - } - case LWC1: { - Format(instr, "lwc1 'ft, 'imms('rs)"); - break; - } - case ORI: { - Format(instr, "ori 'rt, 'rs, 'immu"); - break; - } - case SB: { - Format(instr, "sb 'rt, 'imms('rs)"); - break; - } - case SC: { - Format(instr, "sc 'rt, 'imms('rs)"); - break; - } - case SLTI: { - Format(instr, "slti 'rt, 'rs, 'imms"); - break; - } - case SLTIU: { - Format(instr, "sltiu 'rt, 'rs, 'imms"); - break; - } - case SH: { - Format(instr, "sh 'rt, 'imms('rs)"); - break; - } - case SDC1: { - Format(instr, "sdc1 'ft, 'imms('rs)"); - break; - } - case SW: { - Format(instr, "sw 'rt, 'imms('rs)"); - break; - } - case SWC1: { - Format(instr, "swc1 'ft, 'imms('rs)"); - break; - } - case XORI: { - Format(instr, "xori 'rt, 'rs, 'immu"); - break; - } - default: { - Unknown(instr); - break; - } - } -} - - -void Disassembler::DecodeInstruction(char* hex_buffer, - intptr_t hex_size, - char* human_buffer, - intptr_t human_size, - int* out_instr_len, - const Code& code, - Object** object, - uword pc) { - MIPSDecoder decoder(human_buffer, human_size); - Instr* instr = Instr::At(pc); - decoder.InstructionDecode(instr); - OS::SNPrint(hex_buffer, hex_size, "%08x", instr->InstructionBits()); - if (out_instr_len) { - *out_instr_len = Instr::kInstrSize; - } - - *object = NULL; - if (!code.IsNull()) { - *object = &Object::Handle(); - if (!DecodeLoadObjectFromPoolOrThread(pc, code, *object)) { - *object = NULL; - } - } -} - -#endif // !PRODUCT - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/flow_graph_compiler_mips.cc b/runtime/vm/flow_graph_compiler_mips.cc deleted file mode 100644 index 5002660e7cf..00000000000 --- a/runtime/vm/flow_graph_compiler_mips.cc +++ /dev/null @@ -1,1851 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. -#if defined(TARGET_ARCH_MIPS) - -#include "vm/flow_graph_compiler.h" - -#include "vm/ast_printer.h" -#include "vm/compiler.h" -#include "vm/dart_entry.h" -#include "vm/deopt_instructions.h" -#include "vm/il_printer.h" -#include "vm/instructions.h" -#include "vm/locations.h" -#include "vm/object_store.h" -#include "vm/parser.h" -#include "vm/stack_frame.h" -#include "vm/stub_code.h" -#include "vm/symbols.h" - -namespace dart { - -DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); - - -FlowGraphCompiler::~FlowGraphCompiler() { - // BlockInfos are zone-allocated, so their destructors are not called. - // Verify the labels explicitly here. - for (int i = 0; i < block_info_.length(); ++i) { - ASSERT(!block_info_[i]->jump_label()->IsLinked()); - } -} - - -bool FlowGraphCompiler::SupportsUnboxedDoubles() { - return true; -} - - -bool FlowGraphCompiler::SupportsUnboxedMints() { - return true; -} - - -bool FlowGraphCompiler::SupportsUnboxedSimd128() { - return false; -} - - -bool FlowGraphCompiler::SupportsHardwareDivision() { - return true; -} - - -bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() { - // TODO(johnmccutchan): Investigate possibility on MIPS once - // mints are implemented there. - return false; -} - - -void FlowGraphCompiler::EnterIntrinsicMode() { - ASSERT(!intrinsic_mode()); - intrinsic_mode_ = true; - assembler()->set_constant_pool_allowed(false); -} - - -void FlowGraphCompiler::ExitIntrinsicMode() { - ASSERT(intrinsic_mode()); - intrinsic_mode_ = false; - assembler()->set_constant_pool_allowed(true); -} - - -RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, - DeoptInfoBuilder* builder, - const Array& deopt_table) { - if (deopt_env_ == NULL) { - ++builder->current_info_number_; - return TypedData::null(); - } - - intptr_t stack_height = compiler->StackSize(); - AllocateIncomingParametersRecursive(deopt_env_, &stack_height); - - intptr_t slot_ix = 0; - Environment* current = deopt_env_; - - // Emit all kMaterializeObject instructions describing objects to be - // materialized on the deoptimization as a prefix to the deoptimization info. - EmitMaterializations(deopt_env_, builder); - - // The real frame starts here. - builder->MarkFrameStart(); - - Zone* zone = compiler->zone(); - - builder->AddPp(current->function(), slot_ix++); - builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++); - builder->AddCallerFp(slot_ix++); - builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++); - - - // Emit all values that are needed for materialization as a part of the - // expression stack for the bottom-most frame. This guarantees that GC - // will be able to find them during materialization. - slot_ix = builder->EmitMaterializationArguments(slot_ix); - - // For the innermost environment, set outgoing arguments and the locals. - for (intptr_t i = current->Length() - 1; - i >= current->fixed_parameter_count(); i--) { - builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); - } - - Environment* previous = current; - current = current->outer(); - while (current != NULL) { - builder->AddPp(current->function(), slot_ix++); - builder->AddPcMarker(previous->function(), slot_ix++); - builder->AddCallerFp(slot_ix++); - - // For any outer environment the deopt id is that of the call instruction - // which is recorded in the outer environment. - builder->AddReturnAddress(current->function(), - Thread::ToDeoptAfter(current->deopt_id()), - slot_ix++); - - // The values of outgoing arguments can be changed from the inlined call so - // we must read them from the previous environment. - for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { - builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), - slot_ix++); - } - - // Set the locals, note that outgoing arguments are not in the environment. - for (intptr_t i = current->Length() - 1; - i >= current->fixed_parameter_count(); i--) { - builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); - } - - // Iterate on the outer environment. - previous = current; - current = current->outer(); - } - // The previous pointer is now the outermost environment. - ASSERT(previous != NULL); - - // Set slots for the outermost environment. - builder->AddCallerPp(slot_ix++); - builder->AddPcMarker(previous->function(), slot_ix++); - builder->AddCallerFp(slot_ix++); - builder->AddCallerPc(slot_ix++); - - // For the outermost environment, set the incoming arguments. - for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { - builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); - } - - return builder->CreateDeoptInfo(deopt_table); -} - - -void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, - intptr_t stub_ix) { - // Calls do not need stubs, they share a deoptimization trampoline. - ASSERT(reason() != ICData::kDeoptAtCall); - Assembler* assembler = compiler->assembler(); -#define __ assembler-> - __ Comment("%s", Name()); - __ Bind(entry_label()); - if (FLAG_trap_on_deoptimization) { - __ break_(0); - } - - ASSERT(deopt_env() != NULL); - __ Push(CODE_REG); - __ BranchLink(*StubCode::Deoptimize_entry()); - set_pc_offset(assembler->CodeSize()); -#undef __ -} - - -#define __ assembler()-> - - -// Fall through if bool_register contains null. -void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, - Label* is_true, - Label* is_false) { - __ Comment("BoolToJump"); - Label fall_through; - __ BranchEqual(bool_register, Object::null_object(), &fall_through); - __ BranchEqual(bool_register, Bool::True(), is_true); - __ b(is_false); - __ Bind(&fall_through); -} - - -// A0: instance (must be preserved). -// A1: instantiator type arguments (if used). -// A2: function type arguments (if used). -// Clobbers A3. -RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( - TypeTestStubKind test_kind, - Register instance_reg, - Register instantiator_type_arguments_reg, - Register function_type_arguments_reg, - Register temp_reg, - Label* is_instance_lbl, - Label* is_not_instance_lbl) { - __ Comment("CallSubtypeTestStub"); - ASSERT(instance_reg == A0); - ASSERT(temp_reg == kNoRegister); // Unused on MIPS. - const SubtypeTestCache& type_test_cache = - SubtypeTestCache::ZoneHandle(zone(), SubtypeTestCache::New()); - __ LoadUniqueObject(A3, type_test_cache); - if (test_kind == kTestTypeOneArg) { - ASSERT(instantiator_type_arguments_reg == kNoRegister); - ASSERT(function_type_arguments_reg == kNoRegister); - __ BranchLink(*StubCode::Subtype1TestCache_entry()); - } else if (test_kind == kTestTypeTwoArgs) { - ASSERT(instantiator_type_arguments_reg == kNoRegister); - ASSERT(function_type_arguments_reg == kNoRegister); - __ BranchLink(*StubCode::Subtype2TestCache_entry()); - } else if (test_kind == kTestTypeFourArgs) { - ASSERT(instantiator_type_arguments_reg == A1); - ASSERT(function_type_arguments_reg == A2); - __ BranchLink(*StubCode::Subtype4TestCache_entry()); - } else { - UNREACHABLE(); - } - // Result is in V0: null -> not found, otherwise Bool::True or Bool::False. - GenerateBoolToJump(V0, is_instance_lbl, is_not_instance_lbl); - return type_test_cache.raw(); -} - - -// Jumps to labels 'is_instance' or 'is_not_instance' respectively, if -// type test is conclusive, otherwise fallthrough if a type test could not -// be completed. -// A0: instance being type checked (preserved). -// Clobbers T0. -RawSubtypeTestCache* -FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( - TokenPosition token_pos, - const AbstractType& type, - Label* is_instance_lbl, - Label* is_not_instance_lbl) { - __ Comment("InstantiatedTypeWithArgumentsTest"); - ASSERT(type.IsInstantiated()); - const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); - ASSERT(type.IsFunctionType() || (type_class.NumTypeArguments() > 0)); - const Register kInstanceReg = A0; - Error& bound_error = Error::Handle(zone()); - const Type& int_type = Type::Handle(zone(), Type::IntType()); - const bool smi_is_ok = - int_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld); - // Malformed type should have been handled at graph construction time. - ASSERT(smi_is_ok || bound_error.IsNull()); - __ andi(CMPRES1, kInstanceReg, Immediate(kSmiTagMask)); - if (smi_is_ok) { - __ beq(CMPRES1, ZR, is_instance_lbl); - } else { - __ beq(CMPRES1, ZR, is_not_instance_lbl); - } - // A function type test requires checking the function signature. - if (!type.IsFunctionType()) { - const intptr_t num_type_args = type_class.NumTypeArguments(); - const intptr_t num_type_params = type_class.NumTypeParameters(); - const intptr_t from_index = num_type_args - num_type_params; - const TypeArguments& type_arguments = - TypeArguments::ZoneHandle(zone(), type.arguments()); - const bool is_raw_type = type_arguments.IsNull() || - type_arguments.IsRaw(from_index, num_type_params); - if (is_raw_type) { - const Register kClassIdReg = T0; - // dynamic type argument, check only classes. - __ LoadClassId(kClassIdReg, kInstanceReg); - __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl); - // List is a very common case. - if (IsListClass(type_class)) { - GenerateListTypeCheck(kClassIdReg, is_instance_lbl); - } - return GenerateSubtype1TestCacheLookup( - token_pos, type_class, is_instance_lbl, is_not_instance_lbl); - } - // If one type argument only, check if type argument is Object or dynamic. - if (type_arguments.Length() == 1) { - const AbstractType& tp_argument = - AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0)); - ASSERT(!tp_argument.IsMalformed()); - if (tp_argument.IsType()) { - ASSERT(tp_argument.HasResolvedTypeClass()); - // Check if type argument is dynamic or Object. - const Type& object_type = Type::Handle(zone(), Type::ObjectType()); - if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) { - // Instance class test only necessary. - return GenerateSubtype1TestCacheLookup( - token_pos, type_class, is_instance_lbl, is_not_instance_lbl); - } - } - } - } - // Regular subtype test cache involving instance's type arguments. - const Register kInstantiatorTypeArgumentsReg = kNoRegister; - const Register kFunctionTypeArgumentsReg = kNoRegister; - const Register kTempReg = kNoRegister; - // A0: instance (must be preserved). - return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg, - kInstantiatorTypeArgumentsReg, - kFunctionTypeArgumentsReg, kTempReg, - is_instance_lbl, is_not_instance_lbl); -} - - -void FlowGraphCompiler::CheckClassIds(Register class_id_reg, - const GrowableArray& class_ids, - Label* is_equal_lbl, - Label* is_not_equal_lbl) { - __ Comment("CheckClassIds"); - for (intptr_t i = 0; i < class_ids.length(); i++) { - __ BranchEqual(class_id_reg, Immediate(class_ids[i]), is_equal_lbl); - } - __ b(is_not_equal_lbl); -} - - -// Testing against an instantiated type with no arguments, without -// SubtypeTestCache. -// A0: instance being type checked (preserved). -// Clobbers: T0, T1, T2 -// Returns true if there is a fallthrough. -bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( - TokenPosition token_pos, - const AbstractType& type, - Label* is_instance_lbl, - Label* is_not_instance_lbl) { - __ Comment("InstantiatedTypeNoArgumentsTest"); - ASSERT(type.IsInstantiated()); - if (type.IsFunctionType()) { - // Fallthrough. - return true; - } - const Class& type_class = Class::Handle(zone(), type.type_class()); - ASSERT(type_class.NumTypeArguments() == 0); - - const Register kInstanceReg = A0; - __ andi(T0, A0, Immediate(kSmiTagMask)); - // If instance is Smi, check directly. - const Class& smi_class = Class::Handle(zone(), Smi::Class()); - if (smi_class.IsSubtypeOf(Object::null_type_arguments(), type_class, - Object::null_type_arguments(), NULL, NULL, - Heap::kOld)) { - __ beq(T0, ZR, is_instance_lbl); - } else { - __ beq(T0, ZR, is_not_instance_lbl); - } - const Register kClassIdReg = T0; - __ LoadClassId(kClassIdReg, kInstanceReg); - // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted - // interfaces. - // Bool interface can be implemented only by core class Bool. - if (type.IsBoolType()) { - __ BranchEqual(kClassIdReg, Immediate(kBoolCid), is_instance_lbl); - __ b(is_not_instance_lbl); - return false; - } - // Custom checking for numbers (Smi, Mint, Bigint and Double). - // Note that instance is not Smi (checked above). - if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) { - GenerateNumberTypeCheck(kClassIdReg, type, is_instance_lbl, - is_not_instance_lbl); - return false; - } - if (type.IsStringType()) { - GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl); - return false; - } - if (type.IsDartFunctionType()) { - // Check if instance is a closure. - __ BranchEqual(kClassIdReg, Immediate(kClosureCid), is_instance_lbl); - return true; // Fall through - } - // Compare if the classes are equal. - if (!type_class.is_abstract()) { - __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl); - } - // Otherwise fallthrough. - return true; -} - - -// Uses SubtypeTestCache to store instance class and result. -// A0: instance to test. -// Clobbers A1-A3, T0-T3. -// Immediate class test already done. -// TODO(srdjan): Implement a quicker subtype check, as type test -// arrays can grow too high, but they may be useful when optimizing -// code (type-feedback). -RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( - TokenPosition token_pos, - const Class& type_class, - Label* is_instance_lbl, - Label* is_not_instance_lbl) { - __ Comment("Subtype1TestCacheLookup"); - const Register kInstanceReg = A0; - __ LoadClass(T0, kInstanceReg); - // T0: instance class. - // Check immediate superclass equality. - __ lw(T0, FieldAddress(T0, Class::super_type_offset())); - __ lw(T0, FieldAddress(T0, Type::type_class_id_offset())); - __ BranchEqual(T0, Immediate(Smi::RawValue(type_class.id())), - is_instance_lbl); - - const Register kInstantiatorTypeArgumentsReg = kNoRegister; - const Register kFunctionTypeArgumentsReg = kNoRegister; - const Register kTempReg = kNoRegister; - return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg, - kInstantiatorTypeArgumentsReg, - kFunctionTypeArgumentsReg, kTempReg, - is_instance_lbl, is_not_instance_lbl); -} - - -// Generates inlined check if 'type' is a type parameter or type itself -// A0: instance (preserved). -RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( - TokenPosition token_pos, - const AbstractType& type, - Label* is_instance_lbl, - Label* is_not_instance_lbl) { - __ Comment("UninstantiatedTypeTest"); - ASSERT(!type.IsInstantiated()); - // Skip check if destination is a dynamic type. - if (type.IsTypeParameter()) { - const TypeParameter& type_param = TypeParameter::Cast(type); - __ lw(A1, Address(SP, 1 * kWordSize)); // Get instantiator type args. - __ lw(A2, Address(SP, 0 * kWordSize)); // Get function type args. - // A1: instantiator type arguments. - // A2: function type arguments. - const Register kTypeArgumentsReg = - type_param.IsClassTypeParameter() ? A1 : A2; - // Check if type arguments are null, i.e. equivalent to vector of dynamic. - __ LoadObject(T7, Object::null_object()); - __ beq(kTypeArgumentsReg, T7, is_instance_lbl); - __ lw(T2, FieldAddress(kTypeArgumentsReg, - TypeArguments::type_at_offset(type_param.index()))); - // T2: concrete type of type. - // Check if type argument is dynamic. - __ BranchEqual(T2, Object::dynamic_type(), is_instance_lbl); - __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::ObjectType()), - is_instance_lbl); - // TODO(regis): Optimize void type as well once allowed as type argument. - - // For Smi check quickly against int and num interfaces. - Label not_smi; - __ andi(CMPRES1, A0, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, ¬_smi); // Value is Smi? - __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::IntType()), - is_instance_lbl); - __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::Number()), - is_instance_lbl); - // Smi must be handled in runtime. - Label fall_through; - __ b(&fall_through); - - __ Bind(¬_smi); - // A0: instance. - // A1: instantiator type arguments. - // A2: function type arguments. - const Register kInstanceReg = A0; - const Register kInstantiatorTypeArgumentsReg = A1; - const Register kFunctionTypeArgumentsReg = A2; - const Register kTempReg = kNoRegister; - const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle( - zone(), GenerateCallSubtypeTestStub( - kTestTypeFourArgs, kInstanceReg, - kInstantiatorTypeArgumentsReg, kFunctionTypeArgumentsReg, - kTempReg, is_instance_lbl, is_not_instance_lbl)); - __ Bind(&fall_through); - return type_test_cache.raw(); - } - if (type.IsType()) { - const Register kInstanceReg = A0; - const Register kInstantiatorTypeArgumentsReg = A1; - const Register kFunctionTypeArgumentsReg = A2; - __ andi(CMPRES1, kInstanceReg, Immediate(kSmiTagMask)); - __ beq(CMPRES1, ZR, is_not_instance_lbl); // Is instance Smi? - __ lw(kInstantiatorTypeArgumentsReg, Address(SP, 1 * kWordSize)); - __ lw(kFunctionTypeArgumentsReg, Address(SP, 0 * kWordSize)); - // Uninstantiated type class is known at compile time, but the type - // arguments are determined at runtime by the instantiator. - const Register kTempReg = kNoRegister; - return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg, - kInstantiatorTypeArgumentsReg, - kFunctionTypeArgumentsReg, kTempReg, - is_instance_lbl, is_not_instance_lbl); - } - return SubtypeTestCache::null(); -} - - -// Inputs: -// - A0: instance being type checked (preserved). -// - A1: optional instantiator type arguments (preserved). -// - A2: optional function type arguments (preserved). -// Returns: -// - preserved instance in A0, optional instantiator type arguments in A1, and -// optional function type arguments in A2. -// Clobbers: T0, T1, T2 -// Note that this inlined code must be followed by the runtime_call code, as it -// may fall through to it. Otherwise, this inline code will jump to the label -// is_instance or to the label is_not_instance. -RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof( - TokenPosition token_pos, - const AbstractType& type, - Label* is_instance_lbl, - Label* is_not_instance_lbl) { - __ Comment("InlineInstanceof"); - if (type.IsInstantiated()) { - const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); - // A class equality check is only applicable with a dst type (not a - // function type) of a non-parameterized class or with a raw dst type of - // a parameterized class. - if (type.IsFunctionType() || (type_class.NumTypeArguments() > 0)) { - return GenerateInstantiatedTypeWithArgumentsTest( - token_pos, type, is_instance_lbl, is_not_instance_lbl); - // Fall through to runtime call. - } - const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest( - token_pos, type, is_instance_lbl, is_not_instance_lbl); - if (has_fall_through) { - // If test non-conclusive so far, try the inlined type-test cache. - // 'type' is known at compile time. - return GenerateSubtype1TestCacheLookup( - token_pos, type_class, is_instance_lbl, is_not_instance_lbl); - } else { - return SubtypeTestCache::null(); - } - } - return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl, - is_not_instance_lbl); -} - - -// If instanceof type test cannot be performed successfully at compile time and -// therefore eliminated, optimize it by adding inlined tests for: -// - NULL -> return type == Null (type is not Object or dynamic). -// - Smi -> compile time subtype check (only if dst class is not parameterized). -// - Class equality (only if class is not parameterized). -// Inputs: -// - A0: object. -// - A1: instantiator type arguments or raw_null. -// - A2: function type arguments or raw_null. -// Returns: -// - true or false in V0. -void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos, - intptr_t deopt_id, - const AbstractType& type, - LocationSummary* locs) { - ASSERT(type.IsFinalized() && !type.IsMalformed() && !type.IsMalbounded()); - ASSERT(!type.IsObjectType() && !type.IsDynamicType() && !type.IsVoidType()); - - // Preserve instantiator type arguments (A1) and function type arguments (A2). - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(A1, Address(SP, 1 * kWordSize)); - __ sw(A2, Address(SP, 0 * kWordSize)); - - Label is_instance, is_not_instance; - // If type is instantiated and non-parameterized, we can inline code - // checking whether the tested instance is a Smi. - if (type.IsInstantiated()) { - // A null object is only an instance of Null, Object, and dynamic. - // Object and dynamic have already been checked above (if the type is - // instantiated). So we can return false here if the instance is null, - // unless the type is Null (and if the type is instantiated). - // We can only inline this null check if the type is instantiated at compile - // time, since an uninstantiated type at compile time could be Null, Object, - // or dynamic at run time. - __ BranchEqual(A0, Object::null_object(), - type.IsNullType() ? &is_instance : &is_not_instance); - } - - // Generate inline instanceof test. - SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); - test_cache = - GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance); - - // test_cache is null if there is no fall-through. - Label done; - if (!test_cache.IsNull()) { - // Generate runtime call. - __ lw(A1, Address(SP, 1 * kWordSize)); // Get instantiator type args. - __ lw(A2, Address(SP, 0 * kWordSize)); // Get function type args. - __ addiu(SP, SP, Immediate(-6 * kWordSize)); - __ LoadObject(TMP, Object::null_object()); - __ sw(TMP, Address(SP, 5 * kWordSize)); // Make room for the result. - __ sw(A0, Address(SP, 4 * kWordSize)); // Push the instance. - __ LoadObject(TMP, type); - __ sw(TMP, Address(SP, 3 * kWordSize)); // Push the type. - __ sw(A1, Address(SP, 2 * kWordSize)); // Push instantiator type args. - __ sw(A2, Address(SP, 1 * kWordSize)); // Push function type args. - __ LoadUniqueObject(A0, test_cache); - __ sw(A0, Address(SP, 0 * kWordSize)); - GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 5, locs); - // Pop the parameters supplied to the runtime entry. The result of the - // instanceof runtime call will be left as the result of the operation. - __ lw(V0, Address(SP, 5 * kWordSize)); - __ b(&done); - __ delay_slot()->addiu(SP, SP, Immediate(6 * kWordSize)); - } - __ Bind(&is_not_instance); - __ LoadObject(V0, Bool::Get(false)); - __ b(&done); - - __ Bind(&is_instance); - __ LoadObject(V0, Bool::Get(true)); - __ Bind(&done); - // Remove instantiator type arguments and function type arguments. - __ Drop(2); -} - - -// Optimize assignable type check by adding inlined tests for: -// - NULL -> return NULL. -// - Smi -> compile time subtype check (only if dst class is not parameterized). -// - Class equality (only if class is not parameterized). -// Inputs: -// - A0: instance being type checked. -// - A1: instantiator type arguments or raw_null. -// - A2: function type arguments or raw_null. -// Returns: -// - object in A0 for successful assignable check (or throws TypeError). -// Clobbers: T0, T1, T2 -// Performance notes: positive checks must be quick, negative checks can be slow -// as they throw an exception. -void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos, - intptr_t deopt_id, - const AbstractType& dst_type, - const String& dst_name, - LocationSummary* locs) { - __ Comment("AssertAssignable"); - ASSERT(!token_pos.IsClassifying()); - ASSERT(!dst_type.IsNull()); - ASSERT(dst_type.IsFinalized()); - // Assignable check is skipped in FlowGraphBuilder, not here. - ASSERT(dst_type.IsMalformedOrMalbounded() || - (!dst_type.IsDynamicType() && !dst_type.IsObjectType() && - !dst_type.IsVoidType())); - - // Preserve instantiator type arguments (A1) and function type arguments (A2). - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(A1, Address(SP, 1 * kWordSize)); - __ sw(A2, Address(SP, 0 * kWordSize)); - - // A null object is always assignable and is returned as result. - Label is_assignable, runtime_call; - - __ BranchEqual(A0, Object::null_object(), &is_assignable); - - // Generate throw new TypeError() if the type is malformed or malbounded. - if (dst_type.IsMalformedOrMalbounded()) { - __ addiu(SP, SP, Immediate(-4 * kWordSize)); - __ LoadObject(TMP, Object::null_object()); - __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result. - __ sw(A0, Address(SP, 2 * kWordSize)); // Push the source object. - __ LoadObject(TMP, dst_name); - __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the destination name. - __ LoadObject(TMP, dst_type); - __ sw(TMP, Address(SP, 0 * kWordSize)); // Push the destination type. - - GenerateRuntimeCall(token_pos, deopt_id, kBadTypeErrorRuntimeEntry, 3, - locs); - // We should never return here. - __ break_(0); - - __ Bind(&is_assignable); // For a null object. - __ lw(A1, Address(SP, 1 * kWordSize)); // Restore instantiator type args. - __ lw(A2, Address(SP, 0 * kWordSize)); // Restore function type args. - __ addiu(SP, SP, Immediate(2 * kWordSize)); - return; - } - - // Generate inline type check, linking to runtime call if not assignable. - SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); - test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable, - &runtime_call); - - __ Bind(&runtime_call); - __ lw(A1, Address(SP, 1 * kWordSize)); // Load instantiator type args. - __ lw(A2, Address(SP, 0 * kWordSize)); // Load function type args. - - __ addiu(SP, SP, Immediate(-7 * kWordSize)); - __ LoadObject(TMP, Object::null_object()); - __ sw(TMP, Address(SP, 6 * kWordSize)); // Make room for the result. - __ sw(A0, Address(SP, 5 * kWordSize)); // Push the source object. - __ LoadObject(TMP, dst_type); - __ sw(TMP, Address(SP, 4 * kWordSize)); // Push the type of the destination. - __ sw(A1, Address(SP, 3 * kWordSize)); // Push instantiator type args. - __ sw(A2, Address(SP, 2 * kWordSize)); // Push function type args. - __ LoadObject(TMP, dst_name); - __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the name of the destination. - __ LoadUniqueObject(T0, test_cache); - __ sw(T0, Address(SP, 0 * kWordSize)); - - GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs); - // Pop the parameters supplied to the runtime entry. The result of the - // type check runtime call is the checked value. - __ lw(A0, Address(SP, 6 * kWordSize)); - __ addiu(SP, SP, Immediate(7 * kWordSize)); - - __ Bind(&is_assignable); - __ lw(A1, Address(SP, 1 * kWordSize)); // Restore instantiator type args. - __ lw(A2, Address(SP, 0 * kWordSize)); // Restore function type args. - __ addiu(SP, SP, Immediate(2 * kWordSize)); -} - - -void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { - if (is_optimizing()) return; - Definition* defn = instr->AsDefinition(); - if ((defn != NULL) && defn->HasTemp()) { - __ Push(defn->locs()->out(0).reg()); - } -} - - -// Input parameters: -// S4: arguments descriptor array. -void FlowGraphCompiler::CopyParameters() { - __ Comment("Copy parameters"); - const Function& function = parsed_function().function(); - LocalScope* scope = parsed_function().node_sequence()->scope(); - const int num_fixed_params = function.num_fixed_parameters(); - const int num_opt_pos_params = function.NumOptionalPositionalParameters(); - const int num_opt_named_params = function.NumOptionalNamedParameters(); - const int num_params = - num_fixed_params + num_opt_pos_params + num_opt_named_params; - ASSERT(function.NumParameters() == num_params); - ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp); - - // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args, - // where num_pos_args is the number of positional arguments passed in. - const int min_num_pos_args = num_fixed_params; - const int max_num_pos_args = num_fixed_params + num_opt_pos_params; - - __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::positional_count_offset())); - // Check that min_num_pos_args <= num_pos_args. - Label wrong_num_arguments; - __ BranchSignedLess(T2, Immediate(Smi::RawValue(min_num_pos_args)), - &wrong_num_arguments); - - // Check that num_pos_args <= max_num_pos_args. - __ BranchSignedGreater(T2, Immediate(Smi::RawValue(max_num_pos_args)), - &wrong_num_arguments); - - // Copy positional arguments. - // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied - // to fp[kFirstLocalSlotFromFp - i]. - - __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - // Since T1 and T2 are Smi, use sll 1 instead of sll 2. - // Let T1 point to the last passed positional argument, i.e. to - // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)]. - __ subu(T1, T1, T2); - __ sll(T1, T1, 1); - __ addu(T1, FP, T1); - __ AddImmediate(T1, (kParamEndSlotFromFp + 1) * kWordSize); - - // Let T0 point to the last copied positional argument, i.e. to - // fp[kFirstLocalSlotFromFp - (num_pos_args - 1)]. - __ AddImmediate(T0, FP, (kFirstLocalSlotFromFp + 1) * kWordSize); - __ sll(T2, T2, 1); // T2 is a Smi. - - __ Comment("Argument Copy Loop"); - Label loop, loop_exit; - __ blez(T2, &loop_exit); - __ delay_slot()->subu(T0, T0, T2); - __ Bind(&loop); - __ addu(T4, T1, T2); - __ lw(T3, Address(T4, -kWordSize)); - __ addiu(T2, T2, Immediate(-kWordSize)); - __ addu(T5, T0, T2); - __ bgtz(T2, &loop); - __ delay_slot()->sw(T3, Address(T5)); - __ Bind(&loop_exit); - - // Copy or initialize optional named arguments. - Label all_arguments_processed; -#ifdef DEBUG - const bool check_correct_named_args = true; -#else - const bool check_correct_named_args = function.IsClosureFunction(); -#endif - if (num_opt_named_params > 0) { - __ Comment("There are named parameters"); - // Start by alphabetically sorting the names of the optional parameters. - LocalVariable** opt_param = new LocalVariable*[num_opt_named_params]; - int* opt_param_position = new int[num_opt_named_params]; - for (int pos = num_fixed_params; pos < num_params; pos++) { - LocalVariable* parameter = scope->VariableAt(pos); - const String& opt_param_name = parameter->name(); - int i = pos - num_fixed_params; - while (--i >= 0) { - LocalVariable* param_i = opt_param[i]; - const intptr_t result = opt_param_name.CompareTo(param_i->name()); - ASSERT(result != 0); - if (result > 0) break; - opt_param[i + 1] = opt_param[i]; - opt_param_position[i + 1] = opt_param_position[i]; - } - opt_param[i + 1] = parameter; - opt_param_position[i + 1] = pos; - } - // Generate code handling each optional parameter in alphabetical order. - __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - // Let T1 point to the first passed argument, i.e. to - // fp[kParamEndSlotFromFp + num_args - 0]; num_args (T1) is Smi. - __ sll(T3, T1, 1); - __ addu(T1, FP, T3); - __ AddImmediate(T1, kParamEndSlotFromFp * kWordSize); - // Let T0 point to the entry of the first named argument. - __ AddImmediate(T0, S4, ArgumentsDescriptor::first_named_entry_offset() - - kHeapObjectTag); - for (int i = 0; i < num_opt_named_params; i++) { - Label load_default_value, assign_optional_parameter; - const int param_pos = opt_param_position[i]; - // Check if this named parameter was passed in. - // Load T3 with the name of the argument. - __ lw(T3, Address(T0, ArgumentsDescriptor::name_offset())); - ASSERT(opt_param[i]->name().IsSymbol()); - __ BranchNotEqual(T3, opt_param[i]->name(), &load_default_value); - - // Load T3 with passed-in argument at provided arg_pos, i.e. at - // fp[kParamEndSlotFromFp + num_args - arg_pos]. - __ lw(T3, Address(T0, ArgumentsDescriptor::position_offset())); - // T3 is arg_pos as Smi. - // Point to next named entry. - __ AddImmediate(T0, ArgumentsDescriptor::named_entry_size()); - __ subu(T3, ZR, T3); - __ sll(T3, T3, 1); - __ addu(T3, T1, T3); - __ b(&assign_optional_parameter); - __ delay_slot()->lw(T3, Address(T3)); - - __ Bind(&load_default_value); - // Load T3 with default argument. - const Instance& value = parsed_function().DefaultParameterValueAt( - param_pos - num_fixed_params); - __ LoadObject(T3, value); - __ Bind(&assign_optional_parameter); - // Assign T3 to fp[kFirstLocalSlotFromFp - param_pos]. - // We do not use the final allocation index of the variable here, i.e. - // scope->VariableAt(i)->index(), because captured variables still need - // to be copied to the context that is not yet allocated. - const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; - __ sw(T3, Address(FP, computed_param_pos * kWordSize)); - } - delete[] opt_param; - delete[] opt_param_position; - if (check_correct_named_args) { - // Check that T0 now points to the null terminator in the arguments - // descriptor. - __ lw(T3, Address(T0)); - __ BranchEqual(T3, Object::null_object(), &all_arguments_processed); - } - } else { - ASSERT(num_opt_pos_params > 0); - __ Comment("There are optional positional parameters"); - __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::positional_count_offset())); - __ SmiUntag(T2); - for (int i = 0; i < num_opt_pos_params; i++) { - Label next_parameter; - // Handle this optional positional parameter only if k or fewer positional - // arguments have been passed, where k is param_pos, the position of this - // optional parameter in the formal parameter list. - const int param_pos = num_fixed_params + i; - __ BranchSignedGreater(T2, Immediate(param_pos), &next_parameter); - // Load T3 with default argument. - const Object& value = parsed_function().DefaultParameterValueAt(i); - __ LoadObject(T3, value); - // Assign T3 to fp[kFirstLocalSlotFromFp - param_pos]. - // We do not use the final allocation index of the variable here, i.e. - // scope->VariableAt(i)->index(), because captured variables still need - // to be copied to the context that is not yet allocated. - const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; - __ sw(T3, Address(FP, computed_param_pos * kWordSize)); - __ Bind(&next_parameter); - } - if (check_correct_named_args) { - __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - __ SmiUntag(T1); - // Check that T2 equals T1, i.e. no named arguments passed. - __ beq(T2, T1, &all_arguments_processed); - } - } - - __ Bind(&wrong_num_arguments); - if (function.IsClosureFunction()) { - __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack. - __ Branch(*StubCode::CallClosureNoSuchMethod_entry()); - // The noSuchMethod call may return to the caller, but not here. - } else if (check_correct_named_args) { - __ Stop("Wrong arguments"); - } - - __ Bind(&all_arguments_processed); - // Nullify originally passed arguments only after they have been copied and - // checked, otherwise noSuchMethod would not see their original values. - // This step can be skipped in case we decide that formal parameters are - // implicitly final, since garbage collecting the unmodified value is not - // an issue anymore. - - // S4 : arguments descriptor array. - __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - __ sll(T2, T2, 1); // T2 is a Smi. - - __ Comment("Null arguments loop"); - Label null_args_loop, null_args_loop_exit; - __ blez(T2, &null_args_loop_exit); - __ delay_slot()->addiu(T1, FP, - Immediate((kParamEndSlotFromFp + 1) * kWordSize)); - __ Bind(&null_args_loop); - __ addiu(T2, T2, Immediate(-kWordSize)); - __ addu(T3, T1, T2); - __ LoadObject(T5, Object::null_object()); - __ bgtz(T2, &null_args_loop); - __ delay_slot()->sw(T5, Address(T3)); - __ Bind(&null_args_loop_exit); -} - - -void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { - // RA: return address. - // SP: receiver. - // Sequence node has one return node, its input is load field node. - __ Comment("Inlined Getter"); - __ lw(V0, Address(SP, 0 * kWordSize)); - __ LoadFieldFromOffset(V0, V0, offset); - __ Ret(); -} - - -void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { - // RA: return address. - // SP+1: receiver. - // SP+0: value. - // Sequence node has one store node and one return NULL node. - __ Comment("Inlined Setter"); - __ lw(T0, Address(SP, 1 * kWordSize)); // Receiver. - __ lw(T1, Address(SP, 0 * kWordSize)); // Value. - __ StoreIntoObjectOffset(T0, offset, T1); - __ LoadObject(V0, Object::null_object()); - __ Ret(); -} - - -static const Register new_pp = T7; - - -void FlowGraphCompiler::EmitFrameEntry() { - const Function& function = parsed_function().function(); - if (CanOptimizeFunction() && function.IsOptimizable() && - (!is_optimizing() || may_reoptimize())) { - __ Comment("Invocation Count Check"); - const Register function_reg = T0; - - // Temporarily setup pool pointer for this dart function. - __ LoadPoolPointer(new_pp); - // Load function object from object pool. - __ LoadFunctionFromCalleePool(function_reg, function, new_pp); - - __ lw(T1, FieldAddress(function_reg, Function::usage_counter_offset())); - // Reoptimization of an optimized function is triggered by counting in - // IC stubs, but not at the entry of the function. - if (!is_optimizing()) { - __ addiu(T1, T1, Immediate(1)); - __ sw(T1, FieldAddress(function_reg, Function::usage_counter_offset())); - } - - // Skip Branch if T1 is less than the threshold. - Label dont_branch; - __ BranchSignedLess(T1, Immediate(GetOptimizationThreshold()), - &dont_branch); - - ASSERT(function_reg == T0); - __ Branch(*StubCode::OptimizeFunction_entry(), new_pp); - - __ Bind(&dont_branch); - } - __ Comment("Enter frame"); - if (flow_graph().IsCompiledForOsr()) { - intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - - flow_graph().num_copied_params(); - ASSERT(extra_slots >= 0); - __ EnterOsrFrame(extra_slots * kWordSize); - } else { - ASSERT(StackSize() >= 0); - __ EnterDartFrame(StackSize() * kWordSize); - } -} - - -// Input parameters: -// RA: return address. -// SP: address of last argument. -// FP: caller's frame pointer. -// PP: caller's pool pointer. -// S5: ic-data. -// S4: arguments descriptor array. -void FlowGraphCompiler::CompileGraph() { - InitCompiler(); - const Function& function = parsed_function().function(); - -#ifdef DART_PRECOMPILER - if (function.IsDynamicFunction()) { - __ MonomorphicCheckedEntry(); - } -#endif // DART_PRECOMPILER - - if (TryIntrinsify()) { - // Skip regular code generation. - return; - } - - EmitFrameEntry(); - ASSERT(assembler()->constant_pool_allowed()); - - const int num_fixed_params = function.num_fixed_parameters(); - const int num_copied_params = parsed_function().num_copied_params(); - const int num_locals = parsed_function().num_stack_locals(); - - // We check the number of passed arguments when we have to copy them due to - // the presence of optional parameters. - // No such checking code is generated if only fixed parameters are declared, - // unless we are in debug mode or unless we are compiling a closure. - if (num_copied_params == 0) { - const bool check_arguments = - function.IsClosureFunction() && !flow_graph().IsCompiledForOsr(); - if (check_arguments) { - __ Comment("Check argument count"); - // Check that exactly num_fixed arguments are passed in. - Label correct_num_arguments, wrong_num_arguments; - __ lw(T0, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - __ BranchNotEqual(T0, Immediate(Smi::RawValue(num_fixed_params)), - &wrong_num_arguments); - - __ lw(T1, - FieldAddress(S4, ArgumentsDescriptor::positional_count_offset())); - __ beq(T0, T1, &correct_num_arguments); - __ Bind(&wrong_num_arguments); - __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack. - __ Branch(*StubCode::CallClosureNoSuchMethod_entry()); - // The noSuchMethod call may return to the caller, but not here. - __ Bind(&correct_num_arguments); - } - } else if (!flow_graph().IsCompiledForOsr()) { - CopyParameters(); - } - - if (function.IsClosureFunction() && !flow_graph().IsCompiledForOsr()) { - // Load context from the closure object (first argument). - LocalScope* scope = parsed_function().node_sequence()->scope(); - LocalVariable* closure_parameter = scope->VariableAt(0); - __ lw(CTX, Address(FP, closure_parameter->index() * kWordSize)); - __ lw(CTX, FieldAddress(CTX, Closure::context_offset())); - } - - // In unoptimized code, initialize (non-argument) stack allocated slots to - // null. - if (!is_optimizing()) { - ASSERT(num_locals > 0); // There is always at least context_var. - __ Comment("Initialize spill slots"); - const intptr_t slot_base = parsed_function().first_stack_local_index(); - const intptr_t context_index = - parsed_function().current_context_var()->index(); - if (num_locals > 1) { - __ LoadObject(V0, Object::null_object()); - } - for (intptr_t i = 0; i < num_locals; ++i) { - // Subtract index i (locals lie at lower addresses than FP). - if (((slot_base - i) == context_index)) { - if (function.IsClosureFunction()) { - __ sw(CTX, Address(FP, (slot_base - i) * kWordSize)); - } else { - __ LoadObject(V1, Object::empty_context()); - __ sw(V1, Address(FP, (slot_base - i) * kWordSize)); - } - } else { - ASSERT(num_locals > 1); - __ sw(V0, Address(FP, (slot_base - i) * kWordSize)); - } - } - } - - // Check for a passed type argument vector if the function is generic. - if (FLAG_reify_generic_functions && function.IsGeneric()) { - __ Comment("Check passed-in type args"); - Label store_type_args, ok; - __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::type_args_len_offset())); - if (is_optimizing()) { - // Initialize type_args to null if none passed in. - __ LoadObject(T0, Object::null_object()); - __ BranchEqual(T1, Immediate(0), &store_type_args); - } else { - __ BranchEqual(T1, Immediate(0), &ok); // Already initialized to null. - } - // TODO(regis): Verify that type_args_len is correct. - // Load the passed type args vector in T0 from - // fp[kParamEndSlotFromFp + num_args + 1]; num_args (T1) is Smi. - __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - __ sll(T1, T1, 1); - __ addu(T1, FP, T1); - __ lw(T0, Address(T1, (kParamEndSlotFromFp + 1) * kWordSize)); - // Store T0 into the stack slot reserved for the function type arguments. - // If the function type arguments variable is captured, a copy will happen - // after the context is allocated. - const intptr_t slot_base = parsed_function().first_stack_local_index(); - ASSERT(parsed_function().function_type_arguments()->is_captured() || - parsed_function().function_type_arguments()->index() == slot_base); - __ Bind(&store_type_args); - __ sw(T0, Address(FP, slot_base * kWordSize)); - __ Bind(&ok); - } - - // TODO(regis): Verify that no vector is passed if not generic, unless already - // checked during resolution. - - EndCodeSourceRange(TokenPosition::kDartCodePrologue); - VisitBlocks(); - - __ break_(0); - GenerateDeferredCode(); -} - - -void FlowGraphCompiler::GenerateCall(TokenPosition token_pos, - const StubEntry& stub_entry, - RawPcDescriptors::Kind kind, - LocationSummary* locs) { - __ BranchLink(stub_entry); - EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); -} - - -void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos, - const StubEntry& stub_entry, - RawPcDescriptors::Kind kind, - LocationSummary* locs) { - __ BranchLinkPatchable(stub_entry); - EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); -} - - -void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, - TokenPosition token_pos, - const StubEntry& stub_entry, - RawPcDescriptors::Kind kind, - LocationSummary* locs) { - __ BranchLinkPatchable(stub_entry); - EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); - // Marks either the continuation point in unoptimized code or the - // deoptimization point in optimized code, after call. - const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); - if (is_optimizing()) { - AddDeoptIndexAtCall(deopt_id_after); - } else { - // Add deoptimization continuation point after the call and before the - // arguments are removed. - AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); - } -} - - -void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, - TokenPosition token_pos, - const StubEntry& stub_entry, - RawPcDescriptors::Kind kind, - LocationSummary* locs, - const Function& target) { - // Call sites to the same target can share object pool entries. These - // call sites are never patched for breakpoints: the function is deoptimized - // and the unoptimized code with IC calls for static calls is patched instead. - ASSERT(is_optimizing()); - __ BranchLinkWithEquivalence(stub_entry, target); - - EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); - // Marks either the continuation point in unoptimized code or the - // deoptimization point in optimized code, after call. - const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); - if (is_optimizing()) { - AddDeoptIndexAtCall(deopt_id_after); - } else { - // Add deoptimization continuation point after the call and before the - // arguments are removed. - AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); - } - AddStaticCallTarget(target); -} - - -void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, - intptr_t deopt_id, - const RuntimeEntry& entry, - intptr_t argument_count, - LocationSummary* locs) { - __ CallRuntime(entry, argument_count); - EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs); - if (deopt_id != Thread::kNoDeoptId) { - // Marks either the continuation point in unoptimized code or the - // deoptimization point in optimized code, after call. - const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); - if (is_optimizing()) { - AddDeoptIndexAtCall(deopt_id_after); - } else { - // Add deoptimization continuation point after the call and before the - // arguments are removed. - AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); - } - } -} - - -void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { - // We do not check for overflow when incrementing the edge counter. The - // function should normally be optimized long before the counter can - // overflow; and though we do not reset the counters when we optimize or - // deoptimize, there is a bound on the number of - // optimization/deoptimization cycles we will attempt. - ASSERT(!edge_counters_array_.IsNull()); - __ Comment("Edge counter"); - __ LoadObject(T0, edge_counters_array_); - __ LoadFieldFromOffset(T1, T0, Array::element_offset(edge_id)); - __ AddImmediate(T1, T1, Smi::RawValue(1)); - __ StoreFieldToOffset(T1, T0, Array::element_offset(edge_id)); -} - - -void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry, - const ICData& ic_data, - intptr_t argument_count, - intptr_t deopt_id, - TokenPosition token_pos, - LocationSummary* locs) { - ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); - // Each ICData propagated from unoptimized to optimized code contains the - // function that corresponds to the Dart function of that IC call. Due - // to inlining in optimized code, that function may not correspond to the - // top-level function (parsed_function().function()) which could be - // reoptimized and which counter needs to be incremented. - // Pass the function explicitly, it is used in IC stub. - __ Comment("OptimizedInstanceCall"); - __ LoadObject(T0, parsed_function().function()); - __ LoadUniqueObject(S5, ic_data); - GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, - locs); - __ Drop(argument_count); -} - - -void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry, - const ICData& ic_data, - intptr_t argument_count, - intptr_t deopt_id, - TokenPosition token_pos, - LocationSummary* locs) { - ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); - __ Comment("InstanceCall"); - __ LoadUniqueObject(S5, ic_data); - GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, - locs); - __ Comment("InstanceCall return"); - __ Drop(argument_count); -} - - -void FlowGraphCompiler::EmitMegamorphicInstanceCall( - const String& name, - const Array& arguments_descriptor, - intptr_t argument_count, - intptr_t deopt_id, - TokenPosition token_pos, - LocationSummary* locs, - intptr_t try_index, - intptr_t slow_path_argument_count) { - ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); - const MegamorphicCache& cache = MegamorphicCache::ZoneHandle( - zone(), - MegamorphicCacheTable::Lookup(isolate(), name, arguments_descriptor)); - - __ Comment("MegamorphicCall"); - // Load receiver into T0, - __ lw(T0, Address(SP, (argument_count - 1) * kWordSize)); - __ LoadObject(S5, cache); - __ lw(T9, Address(THR, Thread::megamorphic_call_checked_entry_offset())); - __ jalr(T9); - - RecordSafepoint(locs, slow_path_argument_count); - const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); - if (FLAG_precompiled_mode) { - // Megamorphic calls may occur in slow path stubs. - // If valid use try_index argument. - if (try_index == CatchClauseNode::kInvalidTryIndex) { - try_index = CurrentTryIndex(); - } - AddDescriptor(RawPcDescriptors::kOther, assembler()->CodeSize(), - Thread::kNoDeoptId, token_pos, try_index); - } else if (is_optimizing()) { - AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, - token_pos); - AddDeoptIndexAtCall(deopt_id_after); - } else { - AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, - token_pos); - // Add deoptimization continuation point after the call and before the - // arguments are removed. - AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); - } - EmitCatchEntryState(pending_deoptimization_env_, try_index); - __ Drop(argument_count); -} - - -void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data, - intptr_t argument_count, - intptr_t deopt_id, - TokenPosition token_pos, - LocationSummary* locs) { - ASSERT(ic_data.NumArgsTested() == 1); - const Code& initial_stub = - Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code()); - - __ Comment("SwitchableCall"); - __ lw(T0, Address(SP, (argument_count - 1) * kWordSize)); - __ LoadUniqueObject(CODE_REG, initial_stub); - __ lw(T9, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); - __ LoadUniqueObject(S5, ic_data); - __ jalr(T9); - - EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, RawPcDescriptors::kOther, - locs); - const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); - if (is_optimizing()) { - AddDeoptIndexAtCall(deopt_id_after); - } else { - // Add deoptimization continuation point after the call and before the - // arguments are removed. - AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); - } - __ Drop(argument_count); -} - - -void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count, - intptr_t deopt_id, - TokenPosition token_pos, - LocationSummary* locs, - const ICData& ic_data) { - const StubEntry* stub_entry = - StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); - __ LoadObject(S5, ic_data); - GenerateDartCall(deopt_id, token_pos, *stub_entry, - RawPcDescriptors::kUnoptStaticCall, locs); - __ Drop(argument_count); -} - - -void FlowGraphCompiler::EmitOptimizedStaticCall( - const Function& function, - const Array& arguments_descriptor, - intptr_t argument_count, - intptr_t deopt_id, - TokenPosition token_pos, - LocationSummary* locs) { - __ Comment("StaticCall"); - ASSERT(!function.IsClosureFunction()); - if (function.HasOptionalParameters() || - (FLAG_reify_generic_functions && function.IsGeneric())) { - __ LoadObject(S4, arguments_descriptor); - } else { - __ LoadImmediate(S4, 0); // GC safe smi zero because of stub. - } - // Do not use the code from the function, but let the code be patched so that - // we can record the outgoing edges to other code. - GenerateStaticDartCall(deopt_id, token_pos, - *StubCode::CallStaticFunction_entry(), - RawPcDescriptors::kOther, locs, function); - __ Drop(argument_count); -} - - -Condition FlowGraphCompiler::EmitEqualityRegConstCompare( - Register reg, - const Object& obj, - bool needs_number_check, - TokenPosition token_pos, - intptr_t deopt_id) { - __ Comment("EqualityRegConstCompare"); - ASSERT(!needs_number_check || - (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint())); - if (needs_number_check) { - ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(reg, Address(SP, 1 * kWordSize)); - __ LoadObject(TMP, obj); - __ sw(TMP, Address(SP, 0 * kWordSize)); - if (is_optimizing()) { - __ BranchLinkPatchable( - *StubCode::OptimizedIdenticalWithNumberCheck_entry()); - } else { - __ BranchLinkPatchable( - *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); - } - AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos); - __ Comment("EqualityRegConstCompare return"); - // Stub returns result in CMPRES1 (if it is 0, then reg and obj are equal). - __ lw(reg, Address(SP, 1 * kWordSize)); // Restore 'reg'. - __ addiu(SP, SP, Immediate(2 * kWordSize)); // Discard constant. - return Condition(CMPRES1, ZR, EQ); - } else { - int16_t imm = 0; - const Register obj_reg = __ LoadConditionOperand(CMPRES1, obj, &imm); - return Condition(reg, obj_reg, EQ, imm); - } -} - - -Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, - Register right, - bool needs_number_check, - TokenPosition token_pos, - intptr_t deopt_id) { - __ Comment("EqualityRegRegCompare"); - if (needs_number_check) { - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(left, Address(SP, 1 * kWordSize)); - __ sw(right, Address(SP, 0 * kWordSize)); - if (is_optimizing()) { - __ BranchLinkPatchable( - *StubCode::OptimizedIdenticalWithNumberCheck_entry()); - } else { - __ BranchLinkPatchable( - *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); - } - if (token_pos.IsReal()) { - AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId, - token_pos); - } - __ Comment("EqualityRegRegCompare return"); - // Stub returns result in CMPRES1 (if it is 0, then left and right are - // equal). - __ lw(right, Address(SP, 0 * kWordSize)); - __ lw(left, Address(SP, 1 * kWordSize)); - __ addiu(SP, SP, Immediate(2 * kWordSize)); - return Condition(CMPRES1, ZR, EQ); - } else { - return Condition(left, right, EQ); - } -} - - -// This function must be in sync with FlowGraphCompiler::RecordSafepoint and -// FlowGraphCompiler::SlowPathEnvironmentFor. -void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { -#if defined(DEBUG) - locs->CheckWritableInputs(); - ClobberDeadTempRegisters(locs); -#endif - - __ Comment("SaveLiveRegisters"); - // TODO(vegorov): consider saving only caller save (volatile) registers. - const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); - if (fpu_regs_count > 0) { - __ AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize)); - // Store fpu registers with the lowest register number at the lowest - // address. - intptr_t offset = 0; - for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) { - DRegister fpu_reg = static_cast(i); - if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { - __ StoreDToOffset(fpu_reg, SP, offset); - offset += kFpuRegisterSize; - } - } - ASSERT(offset == (fpu_regs_count * kFpuRegisterSize)); - } - - // The order in which the registers are pushed must match the order - // in which the registers are encoded in the safe point's stack map. - const intptr_t cpu_registers = locs->live_registers()->cpu_registers(); - ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0); - const int register_count = Utils::CountOneBits(cpu_registers); - if (register_count > 0) { - __ addiu(SP, SP, Immediate(-register_count * kWordSize)); - intptr_t offset = register_count * kWordSize; - for (int i = kNumberOfCpuRegisters - 1; i >= 0; --i) { - Register r = static_cast(i); - if (locs->live_registers()->ContainsRegister(r)) { - offset -= kWordSize; - __ sw(r, Address(SP, offset)); - } - } - ASSERT(offset == 0); - } -} - - -void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { - __ Comment("RestoreLiveRegisters"); - const intptr_t cpu_registers = locs->live_registers()->cpu_registers(); - ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0); - const int register_count = Utils::CountOneBits(cpu_registers); - if (register_count > 0) { - intptr_t offset = register_count * kWordSize; - for (int i = kNumberOfCpuRegisters - 1; i >= 0; --i) { - Register r = static_cast(i); - if (locs->live_registers()->ContainsRegister(r)) { - offset -= kWordSize; - __ lw(r, Address(SP, offset)); - } - } - ASSERT(offset == 0); - __ addiu(SP, SP, Immediate(register_count * kWordSize)); - } - - const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); - if (fpu_regs_count > 0) { - // Fpu registers have the lowest register number at the lowest address. - intptr_t offset = 0; - for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) { - DRegister fpu_reg = static_cast(i); - if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { - __ LoadDFromOffset(fpu_reg, SP, offset); - offset += kFpuRegisterSize; - } - } - ASSERT(offset == (fpu_regs_count * kFpuRegisterSize)); - __ AddImmediate(SP, offset); - } -} - - -#if defined(DEBUG) -void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) { - // Clobber temporaries that have not been manually preserved. - for (intptr_t i = 0; i < locs->temp_count(); ++i) { - Location tmp = locs->temp(i); - // TODO(zerny): clobber non-live temporary FPU registers. - if (tmp.IsRegister() && - !locs->live_registers()->ContainsRegister(tmp.reg())) { - __ LoadImmediate(tmp.reg(), 0xf7); - } - } -} -#endif - - -void FlowGraphCompiler::EmitTestAndCallLoadReceiver( - intptr_t argument_count, - const Array& arguments_descriptor) { - __ Comment("EmitTestAndCall"); - // Load receiver into T0. - __ LoadFromOffset(T0, SP, (argument_count - 1) * kWordSize); - __ LoadObject(S4, arguments_descriptor); -} - - -void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) { - __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); - if (if_smi) { - // Jump if receiver is Smi. - __ beq(CMPRES1, ZR, label); - } else { - // Jump if receiver is not Smi. - __ bne(CMPRES1, ZR, label); - } -} - - -void FlowGraphCompiler::EmitTestAndCallLoadCid() { - __ LoadClassId(T2, T0); -} - - -int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label, - const CidRange& range, - int bias) { - intptr_t cid_start = range.cid_start; - if (range.IsSingleCid()) { - __ BranchNotEqual(T2, Immediate(cid_start - bias), next_label); - } else { - __ AddImmediate(T2, T2, bias - cid_start); - bias = cid_start; - // TODO(erikcorry): We should use sltiu instead of the temporary TMP if - // the range is small enough. - __ LoadImmediate(TMP, range.Extent()); - // Reverse comparison so we get 1 if biased cid > tmp ie cid is out of - // range. - __ sltu(TMP, TMP, T2); - __ bne(TMP, ZR, next_label); - } - return bias; -} - - -#undef __ -#define __ compiler_->assembler()-> - - -void ParallelMoveResolver::EmitMove(int index) { - MoveOperands* move = moves_[index]; - const Location source = move->src(); - const Location destination = move->dest(); - __ Comment("ParallelMoveResolver::EmitMove"); - - if (source.IsRegister()) { - if (destination.IsRegister()) { - __ mov(destination.reg(), source.reg()); - } else { - ASSERT(destination.IsStackSlot()); - const intptr_t dest_offset = destination.ToStackSlotOffset(); - __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset); - } - } else if (source.IsStackSlot()) { - if (destination.IsRegister()) { - const intptr_t source_offset = source.ToStackSlotOffset(); - __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset); - } else { - ASSERT(destination.IsStackSlot()); - const intptr_t source_offset = source.ToStackSlotOffset(); - const intptr_t dest_offset = destination.ToStackSlotOffset(); - ScratchRegisterScope tmp(this, kNoRegister); - __ LoadFromOffset(tmp.reg(), source.base_reg(), source_offset); - __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset); - } - } else if (source.IsFpuRegister()) { - if (destination.IsFpuRegister()) { - DRegister dst = destination.fpu_reg(); - DRegister src = source.fpu_reg(); - __ movd(dst, src); - } else { - ASSERT(destination.IsDoubleStackSlot()); - const intptr_t dest_offset = destination.ToStackSlotOffset(); - DRegister src = source.fpu_reg(); - __ StoreDToOffset(src, destination.base_reg(), dest_offset); - } - } else if (source.IsDoubleStackSlot()) { - if (destination.IsFpuRegister()) { - const intptr_t source_offset = source.ToStackSlotOffset(); - DRegister dst = destination.fpu_reg(); - __ LoadDFromOffset(dst, source.base_reg(), source_offset); - } else { - ASSERT(destination.IsDoubleStackSlot()); - const intptr_t source_offset = source.ToStackSlotOffset(); - const intptr_t dest_offset = destination.ToStackSlotOffset(); - __ LoadDFromOffset(DTMP, source.base_reg(), source_offset); - __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); - } - } else { - ASSERT(source.IsConstant()); - const Object& constant = source.constant(); - if (destination.IsRegister()) { - if (constant.IsSmi() && - (source.constant_instruction()->representation() == kUnboxedInt32)) { - __ LoadImmediate(destination.reg(), Smi::Cast(constant).Value()); - } else { - __ LoadObject(destination.reg(), constant); - } - } else if (destination.IsFpuRegister()) { - __ LoadObject(TMP, constant); - __ LoadDFromOffset(destination.fpu_reg(), TMP, - Double::value_offset() - kHeapObjectTag); - } else if (destination.IsDoubleStackSlot()) { - const intptr_t dest_offset = destination.ToStackSlotOffset(); - __ LoadObject(TMP, constant); - __ LoadDFromOffset(DTMP, TMP, Double::value_offset() - kHeapObjectTag); - __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); - } else { - ASSERT(destination.IsStackSlot()); - const intptr_t dest_offset = destination.ToStackSlotOffset(); - ScratchRegisterScope tmp(this, kNoRegister); - if (constant.IsSmi() && - (source.constant_instruction()->representation() == kUnboxedInt32)) { - __ LoadImmediate(tmp.reg(), Smi::Cast(constant).Value()); - } else { - __ LoadObject(tmp.reg(), constant); - } - __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset); - } - } - - move->Eliminate(); -} - - -void ParallelMoveResolver::EmitSwap(int index) { - MoveOperands* move = moves_[index]; - const Location source = move->src(); - const Location destination = move->dest(); - - if (source.IsRegister() && destination.IsRegister()) { - ASSERT(source.reg() != TMP); - ASSERT(destination.reg() != TMP); - __ mov(TMP, source.reg()); - __ mov(source.reg(), destination.reg()); - __ mov(destination.reg(), TMP); - } else if (source.IsRegister() && destination.IsStackSlot()) { - Exchange(source.reg(), destination.base_reg(), - destination.ToStackSlotOffset()); - } else if (source.IsStackSlot() && destination.IsRegister()) { - Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset()); - } else if (source.IsStackSlot() && destination.IsStackSlot()) { - Exchange(source.base_reg(), source.ToStackSlotOffset(), - destination.base_reg(), destination.ToStackSlotOffset()); - } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { - DRegister dst = destination.fpu_reg(); - DRegister src = source.fpu_reg(); - __ movd(DTMP, src); - __ movd(src, dst); - __ movd(dst, DTMP); - } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { - ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot()); - DRegister reg = - source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg(); - Register base_reg = - source.IsFpuRegister() ? destination.base_reg() : source.base_reg(); - const intptr_t slot_offset = source.IsFpuRegister() - ? destination.ToStackSlotOffset() - : source.ToStackSlotOffset(); - __ LoadDFromOffset(DTMP, base_reg, slot_offset); - __ StoreDToOffset(reg, base_reg, slot_offset); - __ movd(reg, DTMP); - } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { - const intptr_t source_offset = source.ToStackSlotOffset(); - const intptr_t dest_offset = destination.ToStackSlotOffset(); - - ScratchFpuRegisterScope ensure_scratch(this, DTMP); - DRegister scratch = ensure_scratch.reg(); - __ LoadDFromOffset(DTMP, source.base_reg(), source_offset); - __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset); - __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); - __ StoreDToOffset(scratch, source.base_reg(), source_offset); - } else { - UNREACHABLE(); - } - - // The swap of source and destination has executed a move from source to - // destination. - move->Eliminate(); - - // Any unperformed (including pending) move with a source of either - // this move's source or destination needs to have their source - // changed to reflect the state of affairs after the swap. - for (int i = 0; i < moves_.length(); ++i) { - const MoveOperands& other_move = *moves_[i]; - if (other_move.Blocks(source)) { - moves_[i]->set_src(destination); - } else if (other_move.Blocks(destination)) { - moves_[i]->set_src(source); - } - } -} - - -void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, - const Address& src) { - __ Comment("ParallelMoveResolver::MoveMemoryToMemory"); - __ lw(TMP, src); - __ sw(TMP, dst); -} - - -void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { - __ Comment("ParallelMoveResolver::StoreObject"); - __ LoadObject(TMP, obj); - __ sw(TMP, dst); -} - - -// Do not call or implement this function. Instead, use the form below that -// uses an offset from the frame pointer instead of an Address. -void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { - UNREACHABLE(); -} - - -// Do not call or implement this function. Instead, use the form below that -// uses offsets from the frame pointer instead of Addresses. -void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { - UNREACHABLE(); -} - - -void ParallelMoveResolver::Exchange(Register reg, - Register base_reg, - intptr_t stack_offset) { - ScratchRegisterScope tmp(this, reg); - __ mov(tmp.reg(), reg); - __ LoadFromOffset(reg, base_reg, stack_offset); - __ StoreToOffset(tmp.reg(), base_reg, stack_offset); -} - - -void ParallelMoveResolver::Exchange(Register base_reg1, - intptr_t stack_offset1, - Register base_reg2, - intptr_t stack_offset2) { - ScratchRegisterScope tmp1(this, kNoRegister); - ScratchRegisterScope tmp2(this, tmp1.reg()); - __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1); - __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2); - __ StoreToOffset(tmp1.reg(), base_reg1, stack_offset2); - __ StoreToOffset(tmp2.reg(), base_reg2, stack_offset1); -} - - -void ParallelMoveResolver::SpillScratch(Register reg) { - __ Comment("ParallelMoveResolver::SpillScratch"); - __ Push(reg); -} - - -void ParallelMoveResolver::RestoreScratch(Register reg) { - __ Comment("ParallelMoveResolver::RestoreScratch"); - __ Pop(reg); -} - - -void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { - __ Comment("ParallelMoveResolver::SpillFpuScratch"); - __ AddImmediate(SP, -kDoubleSize); - __ StoreDToOffset(reg, SP, 0); -} - - -void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { - __ Comment("ParallelMoveResolver::RestoreFpuScratch"); - __ LoadDFromOffset(reg, SP, 0); - __ AddImmediate(SP, kDoubleSize); -} - - -#undef __ - - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/instructions.h b/runtime/vm/instructions.h index be8c604aec0..0a2e81702f8 100644 --- a/runtime/vm/instructions.h +++ b/runtime/vm/instructions.h @@ -15,8 +15,6 @@ #include "vm/instructions_arm.h" #elif defined(TARGET_ARCH_ARM64) #include "vm/instructions_arm64.h" -#elif defined(TARGET_ARCH_MIPS) -#include "vm/instructions_mips.h" #elif defined(TARGET_ARCH_DBC) #include "vm/instructions_dbc.h" #else diff --git a/runtime/vm/instructions_mips.cc b/runtime/vm/instructions_mips.cc deleted file mode 100644 index 3c60538e638..00000000000 --- a/runtime/vm/instructions_mips.cc +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. -#if defined(TARGET_ARCH_MIPS) - -#include "vm/instructions.h" -#include "vm/instructions_mips.h" - -#include "vm/constants_mips.h" -#include "vm/cpu.h" -#include "vm/object.h" - -namespace dart { - -CallPattern::CallPattern(uword pc, const Code& code) - : object_pool_(ObjectPool::Handle(code.GetObjectPool())), - end_(pc), - ic_data_load_end_(0), - target_code_pool_index_(-1), - ic_data_(ICData::Handle()) { - ASSERT(code.ContainsInstructionAt(pc)); - // Last instruction: jalr RA, T9(=R25). - ASSERT(*(reinterpret_cast(end_) - 2) == 0x0320f809); - Register reg; - // The end of the pattern is the instruction after the delay slot of the jalr. - ic_data_load_end_ = InstructionPattern::DecodeLoadWordFromPool( - end_ - (3 * Instr::kInstrSize), ®, &target_code_pool_index_); - ASSERT(reg == CODE_REG); -} - - -// Decodes a load sequence ending at 'end' (the last instruction of the load -// sequence is the instruction before the one at end). Returns a pointer to -// the first instruction in the sequence. Returns the register being loaded -// and the loaded object in the output parameters 'reg' and 'obj' -// respectively. -uword InstructionPattern::DecodeLoadObject(uword end, - const ObjectPool& object_pool, - Register* reg, - Object* obj) { - uword start = 0; - Instr* instr = Instr::At(end - Instr::kInstrSize); - if (instr->OpcodeField() == LW) { - intptr_t index = 0; - start = DecodeLoadWordFromPool(end, reg, &index); - *obj = object_pool.ObjectAt(index); - } else { - intptr_t value = 0; - start = DecodeLoadWordImmediate(end, reg, &value); - *obj = reinterpret_cast(value); - } - return start; -} - - -// Decodes a load sequence ending at 'end' (the last instruction of the load -// sequence is the instruction before the one at end). Returns a pointer to -// the first instruction in the sequence. Returns the register being loaded -// and the loaded immediate value in the output parameters 'reg' and 'value' -// respectively. -uword InstructionPattern::DecodeLoadWordImmediate(uword end, - Register* reg, - intptr_t* value) { - // The pattern is a fixed size, but match backwards for uniformity with - // DecodeLoadWordFromPool. - uword start = end - Instr::kInstrSize; - Instr* instr = Instr::At(start); - intptr_t imm = 0; - ASSERT(instr->OpcodeField() == ORI); - imm = instr->UImmField(); - *reg = instr->RtField(); - - start -= Instr::kInstrSize; - instr = Instr::At(start); - ASSERT(instr->OpcodeField() == LUI); - ASSERT(instr->RtField() == *reg); - imm |= (instr->UImmField() << 16); - *value = imm; - return start; -} - - -// Decodes a load sequence ending at 'end' (the last instruction of the load -// sequence is the instruction before the one at end). Returns a pointer to -// the first instruction in the sequence. Returns the register being loaded -// and the index in the pool being read from in the output parameters 'reg' -// and 'index' respectively. -uword InstructionPattern::DecodeLoadWordFromPool(uword end, - Register* reg, - intptr_t* index) { - uword start = end - Instr::kInstrSize; - Instr* instr = Instr::At(start); - intptr_t offset = 0; - if ((instr->OpcodeField() == LW) && (instr->RsField() == PP)) { - offset = instr->SImmField(); - *reg = instr->RtField(); - } else { - ASSERT(instr->OpcodeField() == LW); - offset = instr->SImmField(); - *reg = instr->RtField(); - - start -= Instr::kInstrSize; - instr = Instr::At(start); - ASSERT(instr->OpcodeField() == SPECIAL); - ASSERT(instr->FunctionField() == ADDU); - ASSERT(instr->RdField() == *reg); - ASSERT(instr->RsField() == *reg); - ASSERT(instr->RtField() == PP); - - start -= Instr::kInstrSize; - instr = Instr::At(start); - ASSERT(instr->OpcodeField() == LUI); - ASSERT(instr->RtField() == *reg); - // Offset is signed, so add the upper 16 bits. - offset += (instr->UImmField() << 16); - } - *index = ObjectPool::IndexFromOffset(offset); - return start; -} - - -bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) { - ASSERT(code.ContainsInstructionAt(pc)); - - Instr* instr = Instr::At(pc); - if ((instr->OpcodeField() == LW)) { - intptr_t offset = instr->SImmField(); - if (instr->RsField() == PP) { - intptr_t index = ObjectPool::IndexFromOffset(offset); - const ObjectPool& pool = ObjectPool::Handle(code.object_pool()); - if (pool.InfoAt(index) == ObjectPool::kTaggedObject) { - *obj = pool.ObjectAt(index); - return true; - } - } else if (instr->RsField() == THR) { - return Thread::ObjectAtOffset(offset, obj); - } - } - // TODO(rmacnak): Sequence for loads beyond 16 bits. - - return false; -} - - -RawICData* CallPattern::IcData() { - if (ic_data_.IsNull()) { - Register reg; - InstructionPattern::DecodeLoadObject(ic_data_load_end_, object_pool_, ®, - &ic_data_); - ASSERT(reg == S5); - } - return ic_data_.raw(); -} - - -RawCode* CallPattern::TargetCode() const { - return reinterpret_cast( - object_pool_.ObjectAt(target_code_pool_index_)); -} - - -void CallPattern::SetTargetCode(const Code& target) const { - object_pool_.SetObjectAt(target_code_pool_index_, target); - // No need to flush the instruction cache, since the code is not modified. -} - - -NativeCallPattern::NativeCallPattern(uword pc, const Code& code) - : object_pool_(ObjectPool::Handle(code.GetObjectPool())), - end_(pc), - native_function_pool_index_(-1), - target_code_pool_index_(-1) { - ASSERT(code.ContainsInstructionAt(pc)); - // Last instruction: jalr RA, T9(=R25). - ASSERT(*(reinterpret_cast(end_) - 2) == 0x0320f809); - - Register reg; - uword native_function_load_end = InstructionPattern::DecodeLoadWordFromPool( - end_ - 3 * Instr::kInstrSize, ®, &target_code_pool_index_); - ASSERT(reg == CODE_REG); - InstructionPattern::DecodeLoadWordFromPool(native_function_load_end, ®, - &native_function_pool_index_); - ASSERT(reg == T5); -} - - -RawCode* NativeCallPattern::target() const { - return reinterpret_cast( - object_pool_.ObjectAt(target_code_pool_index_)); -} - - -void NativeCallPattern::set_target(const Code& target) const { - object_pool_.SetObjectAt(target_code_pool_index_, target); - // No need to flush the instruction cache, since the code is not modified. -} - - -NativeFunction NativeCallPattern::native_function() const { - return reinterpret_cast( - object_pool_.RawValueAt(native_function_pool_index_)); -} - - -void NativeCallPattern::set_native_function(NativeFunction func) const { - object_pool_.SetRawValueAt(native_function_pool_index_, - reinterpret_cast(func)); -} - - -SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code) - : object_pool_(ObjectPool::Handle(code.GetObjectPool())), - data_pool_index_(-1), - target_pool_index_(-1) { - ASSERT(code.ContainsInstructionAt(pc)); - // Last instruction: jalr t9. - ASSERT(*(reinterpret_cast(pc) - 1) == 0); // Delay slot. - ASSERT(*(reinterpret_cast(pc) - 2) == 0x0320f809); - - Register reg; - uword data_load_end = InstructionPattern::DecodeLoadWordFromPool( - pc - 2 * Instr::kInstrSize, ®, &data_pool_index_); - ASSERT(reg == S5); - InstructionPattern::DecodeLoadWordFromPool(data_load_end - Instr::kInstrSize, - ®, &target_pool_index_); - ASSERT(reg == CODE_REG); -} - - -RawObject* SwitchableCallPattern::data() const { - return object_pool_.ObjectAt(data_pool_index_); -} - - -RawCode* SwitchableCallPattern::target() const { - return reinterpret_cast(object_pool_.ObjectAt(target_pool_index_)); -} - - -void SwitchableCallPattern::SetData(const Object& data) const { - ASSERT(!Object::Handle(object_pool_.ObjectAt(data_pool_index_)).IsCode()); - object_pool_.SetObjectAt(data_pool_index_, data); -} - - -void SwitchableCallPattern::SetTarget(const Code& target) const { - ASSERT(Object::Handle(object_pool_.ObjectAt(target_pool_index_)).IsCode()); - object_pool_.SetObjectAt(target_pool_index_, target); -} - - -ReturnPattern::ReturnPattern(uword pc) : pc_(pc) {} - - -bool ReturnPattern::IsValid() const { - Instr* jr = Instr::At(pc_); - return (jr->OpcodeField() == SPECIAL) && (jr->FunctionField() == JR) && - (jr->RsField() == RA); -} - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/instructions_mips.h b/runtime/vm/instructions_mips.h deleted file mode 100644 index 4223857ea0e..00000000000 --- a/runtime/vm/instructions_mips.h +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. -// Classes that describe assembly patterns as used by inline caches. - -#ifndef RUNTIME_VM_INSTRUCTIONS_MIPS_H_ -#define RUNTIME_VM_INSTRUCTIONS_MIPS_H_ - -#ifndef RUNTIME_VM_INSTRUCTIONS_H_ -#error Do not include instructions_mips.h directly; use instructions.h instead. -#endif - -#include "vm/constants_mips.h" -#include "vm/native_entry.h" -#include "vm/object.h" - -namespace dart { - -class InstructionPattern : public AllStatic { - public: - // Decodes a load sequence ending at 'end' (the last instruction of the - // load sequence is the instruction before the one at end). Returns the - // address of the first instruction in the sequence. Returns the register - // being loaded and the loaded object in the output parameters 'reg' and - // 'obj' respectively. - static uword DecodeLoadObject(uword end, - const ObjectPool& object_pool, - Register* reg, - Object* obj); - - // Decodes a load sequence ending at 'end' (the last instruction of the - // load sequence is the instruction before the one at end). Returns the - // address of the first instruction in the sequence. Returns the register - // being loaded and the loaded immediate value in the output parameters - // 'reg' and 'value' respectively. - static uword DecodeLoadWordImmediate(uword end, - Register* reg, - intptr_t* value); - - // Decodes a load sequence ending at 'end' (the last instruction of the - // load sequence is the instruction before the one at end). Returns the - // address of the first instruction in the sequence. Returns the register - // being loaded and the index in the pool being read from in the output - // parameters 'reg' and 'index' respectively. - static uword DecodeLoadWordFromPool(uword end, - Register* reg, - intptr_t* index); -}; - - -class CallPattern : public ValueObject { - public: - CallPattern(uword pc, const Code& code); - - RawICData* IcData(); - - RawCode* TargetCode() const; - void SetTargetCode(const Code& target) const; - - private: - const ObjectPool& object_pool_; - - uword end_; - uword ic_data_load_end_; - - intptr_t target_code_pool_index_; - ICData& ic_data_; - - DISALLOW_COPY_AND_ASSIGN(CallPattern); -}; - - -class NativeCallPattern : public ValueObject { - public: - NativeCallPattern(uword pc, const Code& code); - - RawCode* target() const; - void set_target(const Code& target) const; - - NativeFunction native_function() const; - void set_native_function(NativeFunction target) const; - - private: - const ObjectPool& object_pool_; - - uword end_; - intptr_t native_function_pool_index_; - intptr_t target_code_pool_index_; - - DISALLOW_COPY_AND_ASSIGN(NativeCallPattern); -}; - - -// Instance call that can switch between a direct monomorphic call, an IC call, -// and a megamorphic call. -// load guarded cid load ICData load MegamorphicCache -// load monomorphic target <-> load ICLookup stub -> load MMLookup stub -// call target.entry call stub.entry call stub.entry -class SwitchableCallPattern : public ValueObject { - public: - SwitchableCallPattern(uword pc, const Code& code); - - RawObject* data() const; - RawCode* target() const; - void SetData(const Object& data) const; - void SetTarget(const Code& target) const; - - private: - const ObjectPool& object_pool_; - intptr_t data_pool_index_; - intptr_t target_pool_index_; - - DISALLOW_COPY_AND_ASSIGN(SwitchableCallPattern); -}; - - -class ReturnPattern : public ValueObject { - public: - explicit ReturnPattern(uword pc); - - // jr(RA) = 1 - static const int kLengthInBytes = 1 * Instr::kInstrSize; - - int pattern_length_in_bytes() const { return kLengthInBytes; } - - bool IsValid() const; - - private: - const uword pc_; -}; - -} // namespace dart - -#endif // RUNTIME_VM_INSTRUCTIONS_MIPS_H_ diff --git a/runtime/vm/instructions_mips_test.cc b/runtime/vm/instructions_mips_test.cc deleted file mode 100644 index 38d5936ed13..00000000000 --- a/runtime/vm/instructions_mips_test.cc +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -#include "vm/assembler.h" -#include "vm/instructions.h" -#include "vm/stub_code.h" -#include "vm/unit_test.h" - -namespace dart { - -#define __ assembler-> - -ASSEMBLER_TEST_GENERATE(Call, assembler) { - __ BranchLinkPatchable(*StubCode::InvokeDartCode_entry()); - __ Ret(); -} - - -ASSEMBLER_TEST_RUN(Call, test) { - // The return address, which must be the address of an instruction contained - // in the code, points to the Ret instruction above, i.e. two instructions - // before the end of the code buffer, including the delay slot for the - // return jump. - uword end = test->payload_start() + test->code().Size(); - CallPattern call(end - (2 * Instr::kInstrSize), test->code()); - EXPECT_EQ(StubCode::InvokeDartCode_entry()->code(), call.TargetCode()); -} - - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/intermediate_language.cc b/runtime/vm/intermediate_language.cc index cfb7622d3a7..103e8fae3a5 100644 --- a/runtime/vm/intermediate_language.cc +++ b/runtime/vm/intermediate_language.cc @@ -3051,8 +3051,8 @@ void TargetEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { #endif // The deoptimization descriptor points after the edge counter code for - // uniformity with ARM and MIPS, where we can reuse pattern matching - // code that matches backwards from the end of the pattern. + // uniformity with ARM, where we can reuse pattern matching code that + // matches backwards from the end of the pattern. compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(), TokenPosition::kNoSource); } diff --git a/runtime/vm/intermediate_language_mips.cc b/runtime/vm/intermediate_language_mips.cc deleted file mode 100644 index e513f5ed3f2..00000000000 --- a/runtime/vm/intermediate_language_mips.cc +++ /dev/null @@ -1,5970 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. -#if defined(TARGET_ARCH_MIPS) - -#include "vm/intermediate_language.h" - -#include "vm/compiler.h" -#include "vm/dart_entry.h" -#include "vm/flow_graph.h" -#include "vm/flow_graph_compiler.h" -#include "vm/flow_graph_range_analysis.h" -#include "vm/instructions.h" -#include "vm/locations.h" -#include "vm/object_store.h" -#include "vm/parser.h" -#include "vm/simulator.h" -#include "vm/stack_frame.h" -#include "vm/stub_code.h" -#include "vm/symbols.h" - -#define __ compiler->assembler()-> -#define Z (compiler->zone()) - -namespace dart { - -// Generic summary for call instructions that have all arguments pushed -// on the stack and return the result in a fixed register V0. -LocationSummary* Instruction::MakeCallSummary(Zone* zone) { - LocationSummary* result = - new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall); - result->set_out(0, Location::RegisterLocation(V0)); - return result; -} - - -LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::AnyOrConstant(value())); - return locs; -} - - -void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - // In SSA mode, we need an explicit push. Nothing to do in non-SSA mode - // where PushArgument is handled by BindInstr::EmitNativeCode. - __ Comment("PushArgumentInstr"); - if (compiler->is_optimizing()) { - Location value = locs()->in(0); - if (value.IsRegister()) { - __ Push(value.reg()); - } else if (value.IsConstant()) { - __ PushObject(value.constant()); - } else { - ASSERT(value.IsStackSlot()); - const intptr_t value_offset = value.ToStackSlotOffset(); - __ LoadFromOffset(TMP, FP, value_offset); - __ Push(TMP); - } - } -} - - -LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone, bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::RegisterLocation(V0)); - return locs; -} - - -// Attempt optimized compilation at return instruction instead of at the entry. -// The entry needs to be patchable, no inlined objects are allowed in the area -// that will be overwritten by the patch instructions: a branch macro sequence. -void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("ReturnInstr"); - Register result = locs()->in(0).reg(); - ASSERT(result == V0); - - if (compiler->intrinsic_mode()) { - // Intrinsics don't have a frame. - __ Ret(); - return; - } - -#if defined(DEBUG) - Label stack_ok; - __ Comment("Stack Check"); - const intptr_t fp_sp_dist = - (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; - ASSERT(fp_sp_dist <= 0); - __ subu(CMPRES1, SP, FP); - - __ BranchEqual(CMPRES1, Immediate(fp_sp_dist), &stack_ok); - __ break_(0); - - __ Bind(&stack_ok); -#endif - __ LeaveDartFrameAndReturn(); -} - - -static Condition NegateCondition(Condition condition) { - switch (condition.rel_op()) { - case AL: - condition.set_rel_op(NV); - break; - case NV: - condition.set_rel_op(AL); - break; - case EQ: - condition.set_rel_op(NE); - break; - case NE: - condition.set_rel_op(EQ); - break; - case LT: - condition.set_rel_op(GE); - break; - case LE: - condition.set_rel_op(GT); - break; - case GT: - condition.set_rel_op(LE); - break; - case GE: - condition.set_rel_op(LT); - break; - case ULT: - condition.set_rel_op(UGE); - break; - case ULE: - condition.set_rel_op(UGT); - break; - case UGT: - condition.set_rel_op(ULE); - break; - case UGE: - condition.set_rel_op(ULT); - break; - default: - UNREACHABLE(); - } - return condition; -} - - -LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - comparison()->InitializeLocationSummary(zone, opt); - return comparison()->locs(); -} - - -void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - const Register result = locs()->out(0).reg(); - - intptr_t true_value = if_true_; - intptr_t false_value = if_false_; - bool swapped = false; - if (true_value == 0) { - // Swap values so that false_value is zero. - intptr_t temp = true_value; - true_value = false_value; - false_value = temp; - swapped = true; - } - - // Initialize result with the true value. - __ LoadImmediate(result, Smi::RawValue(true_value)); - - // Emit comparison code. This must not overwrite the result register. - // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using - // the labels or returning an invalid condition. - BranchLabels labels = {NULL, NULL, NULL}; // Emit branch-free code. - Condition true_condition = comparison()->EmitComparisonCode(compiler, labels); - ASSERT(true_condition.IsValid()); - if (swapped) { - true_condition = NegateCondition(true_condition); - } - - // Evaluate condition and provide result in CMPRES1. - Register left = true_condition.left(); - Register right = true_condition.right(); - bool zero_is_false = true; // Zero in CMPRES1 indicates a false condition. - switch (true_condition.rel_op()) { - case AL: - return; // Result holds true_value. - case NV: - __ LoadImmediate(result, false_value); - return; - case EQ: - zero_is_false = false; - // fall through. - case NE: { - if (left == IMM) { - __ XorImmediate(CMPRES1, right, true_condition.imm()); - } else if (right == IMM) { - __ XorImmediate(CMPRES1, left, true_condition.imm()); - } else { - __ xor_(CMPRES1, left, right); - } - break; - } - case GE: - zero_is_false = false; - // fall through. - case LT: { - if (left == IMM) { - __ slti(CMPRES1, right, Immediate(true_condition.imm() + 1)); - zero_is_false = !zero_is_false; - } else if (right == IMM) { - __ slti(CMPRES1, left, Immediate(true_condition.imm())); - } else { - __ slt(CMPRES1, left, right); - } - break; - } - case LE: - zero_is_false = false; - // fall through. - case GT: { - if (left == IMM) { - __ slti(CMPRES1, right, Immediate(true_condition.imm())); - } else if (right == IMM) { - __ slti(CMPRES1, left, Immediate(true_condition.imm() + 1)); - zero_is_false = !zero_is_false; - } else { - __ slt(CMPRES1, right, left); - } - break; - } - case UGE: - zero_is_false = false; - // fall through. - case ULT: { - ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. - __ sltu(CMPRES1, left, right); - break; - } - case ULE: - zero_is_false = false; - // fall through. - case UGT: { - ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. - __ sltu(CMPRES1, right, left); - break; - } - default: - UNREACHABLE(); - } - - // CMPRES1 is the evaluated condition, zero or non-zero, as specified by the - // flag zero_is_false. - Register false_value_reg; - if (false_value == 0) { - false_value_reg = ZR; - } else { - __ LoadImmediate(CMPRES2, Smi::RawValue(false_value)); - false_value_reg = CMPRES2; - } - if (zero_is_false) { - __ movz(result, false_value_reg, CMPRES1); - } else { - __ movn(result, false_value_reg, CMPRES1); - } -} - - -LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - summary->set_in(0, Location::RegisterLocation(T0)); // Function. - summary->set_out(0, Location::RegisterLocation(V0)); - return summary; -} - - -void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - // Load arguments descriptor in S4. - const intptr_t argument_count = ArgumentCount(); // Includes type args. - const Array& arguments_descriptor = - Array::ZoneHandle(Z, GetArgumentsDescriptor()); - __ LoadObject(S4, arguments_descriptor); - - // Load closure function code in T2. - // S4: arguments descriptor array. - // S5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value). - ASSERT(locs()->in(0).reg() == T0); - __ LoadImmediate(S5, 0); - __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); - __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); - __ jalr(T2); - compiler->RecordSafepoint(locs()); - compiler->EmitCatchEntryState(); - // Marks either the continuation point in unoptimized code or the - // deoptimization point in optimized code, after call. - const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id()); - if (compiler->is_optimizing()) { - compiler->AddDeoptIndexAtCall(deopt_id_after); - } - // Add deoptimization continuation point after the call and before the - // arguments are removed. - // In optimized code this descriptor is needed for exception handling. - compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, - token_pos()); - __ Drop(argument_count); -} - - -LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - return LocationSummary::Make(zone, 0, Location::RequiresRegister(), - LocationSummary::kNoCall); -} - - -void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("LoadLocalInstr"); - Register result = locs()->out(0).reg(); - __ LoadFromOffset(result, FP, local().index() * kWordSize); -} - - -LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - return LocationSummary::Make(zone, 1, Location::SameAsFirstInput(), - LocationSummary::kNoCall); -} - - -void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("StoreLocalInstr"); - Register value = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - ASSERT(result == value); // Assert that register assignment is correct. - __ StoreToOffset(value, FP, local().index() * kWordSize); -} - - -LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - return LocationSummary::Make(zone, 0, Location::RequiresRegister(), - LocationSummary::kNoCall); -} - - -void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - // The register allocator drops constant definitions that have no uses. - if (!locs()->out(0).IsInvalid()) { - __ Comment("ConstantInstr"); - Register result = locs()->out(0).reg(); - __ LoadObject(result, value()); - } -} - - -LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 0; - const intptr_t kNumTemps = (representation_ == kUnboxedInt32) ? 0 : 1; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - if (representation_ == kUnboxedInt32) { - locs->set_out(0, Location::RequiresRegister()); - } else { - ASSERT(representation_ == kUnboxedDouble); - locs->set_out(0, Location::RequiresFpuRegister()); - } - if (kNumTemps > 0) { - locs->set_temp(0, Location::RequiresRegister()); - } - return locs; -} - - -void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - // The register allocator drops constant definitions that have no uses. - if (!locs()->out(0).IsInvalid()) { - switch (representation_) { - case kUnboxedDouble: { - ASSERT(value().IsDouble()); - const Register const_value = locs()->temp(0).reg(); - const DRegister result = locs()->out(0).fpu_reg(); - __ LoadObject(const_value, value()); - __ LoadDFromOffset(result, const_value, - Double::value_offset() - kHeapObjectTag); - break; - } - - case kUnboxedInt32: - __ LoadImmediate(locs()->out(0).reg(), Smi::Cast(value()).Value()); - break; - - default: - UNREACHABLE(); - } - } -} - - -LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 3; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - summary->set_in(0, Location::RegisterLocation(A0)); // Value. - summary->set_in(1, Location::RegisterLocation(A1)); // Instant. type args. - summary->set_in(2, Location::RegisterLocation(A2)); // Function type args. - summary->set_out(0, Location::RegisterLocation(A0)); - return summary; -} - - -LocationSummary* AssertBooleanInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - locs->set_in(0, Location::RegisterLocation(A0)); - locs->set_out(0, Location::RegisterLocation(A0)); - return locs; -} - - -static void EmitAssertBoolean(Register reg, - TokenPosition token_pos, - intptr_t deopt_id, - LocationSummary* locs, - FlowGraphCompiler* compiler) { - // Check that the type of the value is allowed in conditional context. - // Call the runtime if the object is not bool::true or bool::false. - ASSERT(locs->always_calls()); - Label done; - - if (Isolate::Current()->type_checks()) { - __ BranchEqual(reg, Bool::True(), &done); - __ BranchEqual(reg, Bool::False(), &done); - } else { - ASSERT(Isolate::Current()->asserts()); - __ BranchNotEqual(reg, Object::null_instance(), &done); - } - - __ Push(reg); // Push the source object. - compiler->GenerateRuntimeCall(token_pos, deopt_id, - kNonBoolTypeErrorRuntimeEntry, 1, locs); - // We should never return here. - __ break_(0); - __ Bind(&done); -} - - -void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register obj = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - - __ Comment("AssertBooleanInstr"); - EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler); - ASSERT(obj == result); -} - - -LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - if (operation_cid() == kMintCid) { - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - locs->set_in(1, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - locs->set_out(0, Location::RequiresRegister()); - return locs; - } - if (operation_cid() == kDoubleCid) { - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::RequiresFpuRegister()); - locs->set_in(1, Location::RequiresFpuRegister()); - locs->set_out(0, Location::RequiresRegister()); - return locs; - } - if (operation_cid() == kSmiCid) { - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::RegisterOrConstant(left())); - // Only one input can be a constant operand. The case of two constant - // operands should be handled by constant propagation. - locs->set_in(1, locs->in(0).IsConstant() - ? Location::RequiresRegister() - : Location::RegisterOrConstant(right())); - locs->set_out(0, Location::RequiresRegister()); - return locs; - } - UNREACHABLE(); - return NULL; -} - - -static void LoadValueCid(FlowGraphCompiler* compiler, - Register value_cid_reg, - Register value_reg, - Label* value_is_smi = NULL) { - __ Comment("LoadValueCid"); - Label done; - if (value_is_smi == NULL) { - __ LoadImmediate(value_cid_reg, kSmiCid); - } - __ andi(CMPRES1, value_reg, Immediate(kSmiTagMask)); - if (value_is_smi == NULL) { - __ beq(CMPRES1, ZR, &done); - } else { - __ beq(CMPRES1, ZR, value_is_smi); - } - __ LoadClassId(value_cid_reg, value_reg); - __ Bind(&done); -} - - -static RelationOperator TokenKindToIntRelOp(Token::Kind kind) { - switch (kind) { - case Token::kEQ: - return EQ; - case Token::kNE: - return NE; - case Token::kLT: - return LT; - case Token::kGT: - return GT; - case Token::kLTE: - return LE; - case Token::kGTE: - return GE; - default: - UNREACHABLE(); - return NV; - } -} - - -static RelationOperator TokenKindToUintRelOp(Token::Kind kind) { - switch (kind) { - case Token::kEQ: - return EQ; - case Token::kNE: - return NE; - case Token::kLT: - return ULT; - case Token::kGT: - return UGT; - case Token::kLTE: - return ULE; - case Token::kGTE: - return UGE; - default: - UNREACHABLE(); - return NV; - } -} - - -// The comparison code to emit is specified by true_condition. -static void EmitBranchOnCondition(FlowGraphCompiler* compiler, - Condition true_condition, - BranchLabels labels) { - __ Comment("ControlInstruction::EmitBranchOnCondition"); - if (labels.fall_through == labels.false_label) { - // If the next block is the false successor, fall through to it. - __ BranchOnCondition(true_condition, labels.true_label); - } else { - // If the next block is not the false successor, branch to it. - Condition false_condition = NegateCondition(true_condition); - __ BranchOnCondition(false_condition, labels.false_label); - // Fall through or jump to the true successor. - if (labels.fall_through != labels.true_label) { - __ b(labels.true_label); - } - } -} - - -static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler, - const LocationSummary& locs, - Token::Kind kind) { - __ Comment("EmitSmiComparisonOp"); - const Location left = locs.in(0); - const Location right = locs.in(1); - ASSERT(!left.IsConstant() || !right.IsConstant()); - ASSERT(left.IsRegister() || left.IsConstant()); - ASSERT(right.IsRegister() || right.IsConstant()); - - int16_t imm = 0; - const Register left_reg = - left.IsRegister() ? left.reg() : __ LoadConditionOperand( - CMPRES1, left.constant(), &imm); - const Register right_reg = - right.IsRegister() ? right.reg() : __ LoadConditionOperand( - CMPRES2, right.constant(), &imm); - return Condition(left_reg, right_reg, TokenKindToIntRelOp(kind), imm); -} - - -static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler, - const LocationSummary& locs, - Token::Kind kind, - BranchLabels labels) { - __ Comment("EmitUnboxedMintEqualityOp"); - ASSERT(Token::IsEqualityOperator(kind)); - PairLocation* left_pair = locs.in(0).AsPairLocation(); - Register left_lo = left_pair->At(0).reg(); - Register left_hi = left_pair->At(1).reg(); - PairLocation* right_pair = locs.in(1).AsPairLocation(); - Register right_lo = right_pair->At(0).reg(); - Register right_hi = right_pair->At(1).reg(); - - if (labels.false_label == NULL) { - // Generate branch-free code. - __ xor_(CMPRES1, left_lo, right_lo); - __ xor_(AT, left_hi, right_hi); - __ or_(CMPRES1, CMPRES1, AT); - return Condition(CMPRES1, ZR, TokenKindToUintRelOp(kind)); - } else { - if (kind == Token::kEQ) { - __ bne(left_hi, right_hi, labels.false_label); - } else { - ASSERT(kind == Token::kNE); - __ bne(left_hi, right_hi, labels.true_label); - } - return Condition(left_lo, right_lo, TokenKindToUintRelOp(kind)); - } -} - - -static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler, - const LocationSummary& locs, - Token::Kind kind, - BranchLabels labels) { - __ Comment("EmitUnboxedMintComparisonOp"); - PairLocation* left_pair = locs.in(0).AsPairLocation(); - Register left_lo = left_pair->At(0).reg(); - Register left_hi = left_pair->At(1).reg(); - PairLocation* right_pair = locs.in(1).AsPairLocation(); - Register right_lo = right_pair->At(0).reg(); - Register right_hi = right_pair->At(1).reg(); - - if (labels.false_label == NULL) { - // Generate branch-free code (except for skipping the lower words compare). - // Result in CMPRES1, CMPRES2, so that CMPRES1 op CMPRES2 === left op right. - Label done; - // Compare upper halves first. - __ slt(CMPRES1, right_hi, left_hi); - __ slt(CMPRES2, left_hi, right_hi); - // If higher words aren't equal, skip comparing lower words. - __ bne(CMPRES1, CMPRES2, &done); - - __ sltu(CMPRES1, right_lo, left_lo); - __ sltu(CMPRES2, left_lo, right_lo); - __ Bind(&done); - return Condition(CMPRES1, CMPRES2, TokenKindToUintRelOp(kind)); - } else { - switch (kind) { - case Token::kLT: - case Token::kLTE: { - __ slt(AT, left_hi, right_hi); - __ bne(AT, ZR, labels.true_label); - __ delay_slot()->slt(AT, right_hi, left_hi); - __ bne(AT, ZR, labels.false_label); - break; - } - case Token::kGT: - case Token::kGTE: { - __ slt(AT, left_hi, right_hi); - __ bne(AT, ZR, labels.false_label); - __ delay_slot()->slt(AT, right_hi, left_hi); - __ bne(AT, ZR, labels.true_label); - break; - } - default: - UNREACHABLE(); - } - return Condition(left_lo, right_lo, TokenKindToUintRelOp(kind)); - } -} - - -static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler, - const LocationSummary& locs, - Token::Kind kind, - BranchLabels labels) { - DRegister left = locs.in(0).fpu_reg(); - DRegister right = locs.in(1).fpu_reg(); - - __ Comment("DoubleComparisonOp(left=%d, right=%d)", left, right); - - __ cund(left, right); - Label* nan_label = - (kind == Token::kNE) ? labels.true_label : labels.false_label; - __ bc1t(nan_label); - - switch (kind) { - case Token::kEQ: - __ ceqd(left, right); - break; - case Token::kNE: - __ ceqd(left, right); - break; - case Token::kLT: - __ coltd(left, right); - break; - case Token::kLTE: - __ coled(left, right); - break; - case Token::kGT: - __ coltd(right, left); - break; - case Token::kGTE: - __ coled(right, left); - break; - default: { - // We should only be passing the above conditions to this function. - UNREACHABLE(); - break; - } - } - - if (labels.false_label == NULL) { - // Generate branch-free code and return result in condition. - __ LoadImmediate(CMPRES1, 1); - if (kind == Token::kNE) { - __ movf(CMPRES1, ZR); - } else { - __ movt(CMPRES1, ZR); - } - return Condition(CMPRES1, ZR, EQ); - } else { - if (labels.fall_through == labels.false_label) { - if (kind == Token::kNE) { - __ bc1f(labels.true_label); - } else { - __ bc1t(labels.true_label); - } - // Since we already branched on true, return the never true condition. - return Condition(CMPRES1, CMPRES2, NV); - } else { - if (kind == Token::kNE) { - __ bc1t(labels.false_label); - } else { - __ bc1f(labels.false_label); - } - // Since we already branched on false, return the always true condition. - return Condition(CMPRES1, CMPRES2, AL); - } - } -} - - -Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, - BranchLabels labels) { - if (operation_cid() == kSmiCid) { - return EmitSmiComparisonOp(compiler, *locs(), kind()); - } else if (operation_cid() == kMintCid) { - return EmitUnboxedMintEqualityOp(compiler, *locs(), kind(), labels); - } else { - ASSERT(operation_cid() == kDoubleCid); - return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels); - } -} - - -void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Label is_true, is_false; - BranchLabels labels = {&is_true, &is_false, &is_false}; - Condition true_condition = EmitComparisonCode(compiler, labels); - if (true_condition.IsValid()) { - EmitBranchOnCondition(compiler, true_condition, labels); - } - - Register result = this->locs()->out(0).reg(); - Label done; - __ Bind(&is_false); - __ LoadObject(result, Bool::False()); - __ b(&done); - __ Bind(&is_true); - __ LoadObject(result, Bool::True()); - __ Bind(&done); -} - - -void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler, - BranchInstr* branch) { - BranchLabels labels = compiler->CreateBranchLabels(branch); - Condition true_condition = EmitComparisonCode(compiler, labels); - if (true_condition.IsValid()) { - EmitBranchOnCondition(compiler, true_condition, labels); - } -} - - -LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::RequiresRegister()); - // Only one input can be a constant operand. The case of two constant - // operands should be handled by constant propagation. - locs->set_in(1, Location::RegisterOrConstant(right())); - return locs; -} - - -Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler, - BranchLabels labels) { - Register left = locs()->in(0).reg(); - Location right = locs()->in(1); - if (right.IsConstant()) { - ASSERT(right.constant().IsSmi()); - const int32_t imm = reinterpret_cast(right.constant().raw()); - __ AndImmediate(CMPRES1, left, imm); - } else { - __ and_(CMPRES1, left, right.reg()); - } - return Condition(CMPRES1, ZR, (kind() == Token::kNE) ? NE : EQ); -} - - -LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 1; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::RequiresRegister()); - locs->set_temp(0, Location::RequiresRegister()); - locs->set_out(0, Location::RequiresRegister()); - return locs; -} - - -Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler, - BranchLabels labels) { - ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT)); - Register val_reg = locs()->in(0).reg(); - Register cid_reg = locs()->temp(0).reg(); - - Label* deopt = - CanDeoptimize() - ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids, - licm_hoisted_ ? ICData::kHoisted : 0) - : NULL; - - const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; - const ZoneGrowableArray& data = cid_results(); - ASSERT(data[0] == kSmiCid); - bool result = data[1] == true_result; - __ andi(CMPRES1, val_reg, Immediate(kSmiTagMask)); - __ beq(CMPRES1, ZR, result ? labels.true_label : labels.false_label); - - __ LoadClassId(cid_reg, val_reg); - for (intptr_t i = 2; i < data.length(); i += 2) { - const intptr_t test_cid = data[i]; - ASSERT(test_cid != kSmiCid); - result = data[i + 1] == true_result; - __ BranchEqual(cid_reg, Immediate(test_cid), - result ? labels.true_label : labels.false_label); - } - // No match found, deoptimize or default action. - if (deopt == NULL) { - // If the cid is not in the list, jump to the opposite label from the cids - // that are in the list. These must be all the same (see asserts in the - // constructor). - Label* target = result ? labels.false_label : labels.true_label; - if (target != labels.fall_through) { - __ b(target); - } - } else { - __ b(deopt); - } - // Dummy result as this method already did the jump, there's no need - // for the caller to branch on a condition. - return Condition(ZR, ZR, INVALID_RELATION); -} - - -LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - if (operation_cid() == kMintCid) { - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - locs->set_in(1, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - locs->set_out(0, Location::RequiresRegister()); - return locs; - } - if (operation_cid() == kDoubleCid) { - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresFpuRegister()); - summary->set_in(1, Location::RequiresFpuRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; - } - ASSERT(operation_cid() == kSmiCid); - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RegisterOrConstant(left())); - // Only one input can be a constant operand. The case of two constant - // operands should be handled by constant propagation. - summary->set_in(1, summary->in(0).IsConstant() - ? Location::RequiresRegister() - : Location::RegisterOrConstant(right())); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, - BranchLabels labels) { - if (operation_cid() == kSmiCid) { - return EmitSmiComparisonOp(compiler, *locs(), kind()); - } else if (operation_cid() == kMintCid) { - return EmitUnboxedMintComparisonOp(compiler, *locs(), kind(), labels); - } else { - ASSERT(operation_cid() == kDoubleCid); - return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels); - } -} - - -LocationSummary* NativeCallInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - return MakeCallSummary(zone); -} - - -void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - SetupNative(); - __ Comment("NativeCallInstr"); - Register result = locs()->out(0).reg(); - - // Push the result place holder initialized to NULL. - __ PushObject(Object::null_object()); - // Pass a pointer to the first argument in A2. - if (!function().HasOptionalParameters()) { - __ AddImmediate( - A2, FP, (kParamEndSlotFromFp + function().NumParameters()) * kWordSize); - } else { - __ AddImmediate(A2, FP, kFirstLocalSlotFromFp * kWordSize); - } - // Compute the effective address. When running under the simulator, - // this is a redirection address that forces the simulator to call - // into the runtime system. - uword entry; - const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function()); - const StubEntry* stub_entry; - if (link_lazily()) { - stub_entry = StubCode::CallBootstrapNative_entry(); - entry = NativeEntry::LinkNativeCallEntry(); - } else { - entry = reinterpret_cast(native_c_function()); - if (is_bootstrap_native()) { - stub_entry = StubCode::CallBootstrapNative_entry(); -#if defined(USING_SIMULATOR) - entry = Simulator::RedirectExternalReference( - entry, Simulator::kBootstrapNativeCall, NativeEntry::kNumArguments); -#endif - } else if (is_auto_scope()) { - // In the case of non bootstrap native methods the CallNativeCFunction - // stub generates the redirection address when running under the simulator - // and hence we do not change 'entry' here. - stub_entry = StubCode::CallAutoScopeNative_entry(); - } else { - // In the case of non bootstrap native methods the CallNativeCFunction - // stub generates the redirection address when running under the simulator - // and hence we do not change 'entry' here. - stub_entry = StubCode::CallNoScopeNative_entry(); - } - } - __ LoadImmediate(A1, argc_tag); - ExternalLabel label(entry); - __ LoadNativeEntry(T5, &label, kNotPatchable); - if (link_lazily()) { - compiler->GeneratePatchableCall(token_pos(), *stub_entry, - RawPcDescriptors::kOther, locs()); - } else { - compiler->GenerateCall(token_pos(), *stub_entry, RawPcDescriptors::kOther, - locs()); - } - __ Pop(result); -} - - -LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary( - Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - // TODO(fschneider): Allow immediate operands for the char code. - return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), - LocationSummary::kNoCall); -} - - -void OneByteStringFromCharCodeInstr::EmitNativeCode( - FlowGraphCompiler* compiler) { - ASSERT(compiler->is_optimizing()); - Register char_code = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - - __ lw(result, Address(THR, Thread::predefined_symbols_address_offset())); - __ AddImmediate(result, Symbols::kNullCharCodeSymbolOffset * kWordSize); - __ sll(TMP, char_code, 1); // Char code is a smi. - __ addu(TMP, TMP, result); - __ lw(result, Address(TMP)); -} - - -LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), - LocationSummary::kNoCall); -} - - -void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("StringToCharCodeInstr"); - - ASSERT(cid_ == kOneByteStringCid); - Register str = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - ASSERT(str != result); - Label done; - __ lw(result, FieldAddress(str, String::length_offset())); - __ BranchNotEqual(result, Immediate(Smi::RawValue(1)), &done); - __ delay_slot()->addiu(result, ZR, Immediate(Smi::RawValue(-1))); - __ lbu(result, FieldAddress(str, OneByteString::data_offset())); - __ SmiTag(result); - __ Bind(&done); -} - - -LocationSummary* StringInterpolateInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - summary->set_in(0, Location::RegisterLocation(A0)); - summary->set_out(0, Location::RegisterLocation(V0)); - return summary; -} - - -void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register array = locs()->in(0).reg(); - __ Push(array); - const int kTypeArgsLen = 0; - const int kNumberOfArguments = 1; - const Array& kNoArgumentNames = Object::null_array(); - ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kNoArgumentNames); - compiler->GenerateStaticCall(deopt_id(), token_pos(), CallFunction(), - args_info, locs(), ICData::Handle()); - ASSERT(locs()->out(0).reg() == V0); -} - - -LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), - LocationSummary::kNoCall); -} - - -void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register obj = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - if (object()->definition()->representation() == kUntagged) { - __ LoadFromOffset(result, obj, offset()); - } else { - ASSERT(object()->definition()->representation() == kTagged); - __ LoadFieldFromOffset(result, obj, offset()); - } -} - - -LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), - LocationSummary::kNoCall); -} - - -void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register object = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - const AbstractType& value_type = *this->object()->Type()->ToAbstractType(); - if (CompileType::Smi().IsAssignableTo(value_type) || - value_type.IsTypeParameter()) { - __ LoadTaggedClassIdMayBeSmi(result, object); - } else { - __ LoadClassId(result, object); - __ SmiTag(result); - } -} - - -CompileType LoadIndexedInstr::ComputeType() const { - switch (class_id_) { - case kArrayCid: - case kImmutableArrayCid: - return CompileType::Dynamic(); - - case kTypedDataFloat32ArrayCid: - case kTypedDataFloat64ArrayCid: - return CompileType::FromCid(kDoubleCid); - case kTypedDataFloat32x4ArrayCid: - return CompileType::FromCid(kFloat32x4Cid); - case kTypedDataInt32x4ArrayCid: - return CompileType::FromCid(kInt32x4Cid); - - case kTypedDataInt8ArrayCid: - case kTypedDataUint8ArrayCid: - case kTypedDataUint8ClampedArrayCid: - case kExternalTypedDataUint8ArrayCid: - case kExternalTypedDataUint8ClampedArrayCid: - case kTypedDataInt16ArrayCid: - case kTypedDataUint16ArrayCid: - case kOneByteStringCid: - case kTwoByteStringCid: - case kExternalOneByteStringCid: - case kExternalTwoByteStringCid: - return CompileType::FromCid(kSmiCid); - - case kTypedDataInt32ArrayCid: - case kTypedDataUint32ArrayCid: - return CompileType::Int(); - - default: - UNIMPLEMENTED(); - return CompileType::Dynamic(); - } -} - - -Representation LoadIndexedInstr::representation() const { - switch (class_id_) { - case kArrayCid: - case kImmutableArrayCid: - case kTypedDataInt8ArrayCid: - case kTypedDataUint8ArrayCid: - case kTypedDataUint8ClampedArrayCid: - case kExternalTypedDataUint8ArrayCid: - case kExternalTypedDataUint8ClampedArrayCid: - case kTypedDataInt16ArrayCid: - case kTypedDataUint16ArrayCid: - case kOneByteStringCid: - case kTwoByteStringCid: - case kExternalOneByteStringCid: - case kExternalTwoByteStringCid: - return kTagged; - case kTypedDataInt32ArrayCid: - return kUnboxedInt32; - case kTypedDataUint32ArrayCid: - return kUnboxedUint32; - case kTypedDataFloat32ArrayCid: - case kTypedDataFloat64ArrayCid: - return kUnboxedDouble; - case kTypedDataInt32x4ArrayCid: - return kUnboxedInt32x4; - case kTypedDataFloat32x4ArrayCid: - return kUnboxedFloat32x4; - default: - UNIMPLEMENTED(); - return kTagged; - } -} - - -static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) { - ConstantInstr* constant = value->definition()->AsConstant(); - if ((constant == NULL) || !Assembler::IsSafeSmi(constant->value())) { - return false; - } - const int64_t index = Smi::Cast(constant->value()).AsInt64Value(); - const intptr_t scale = Instance::ElementSizeFor(cid); - const int64_t offset = - index * scale + - (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); - if (!Utils::IsInt(32, offset)) { - return false; - } - return Address::CanHoldOffset(static_cast(offset)); -} - - -LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = aligned() ? 0 : 1; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::RequiresRegister()); - if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { - locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); - } else { - locs->set_in(1, Location::RequiresRegister()); - } - if ((representation() == kUnboxedDouble) || - (representation() == kUnboxedFloat32x4) || - (representation() == kUnboxedInt32x4)) { - locs->set_out(0, Location::RequiresFpuRegister()); - } else { - locs->set_out(0, Location::RequiresRegister()); - } - if (!aligned()) { - locs->set_temp(0, Location::RequiresRegister()); - } - return locs; -} - - -void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("LoadIndexedInstr"); - // The array register points to the backing store for external arrays. - const Register array = locs()->in(0).reg(); - const Location index = locs()->in(1); - const Register address = aligned() ? kNoRegister : locs()->temp(0).reg(); - - Address element_address(kNoRegister); - if (aligned()) { - element_address = - index.IsRegister() - ? __ ElementAddressForRegIndex(true, // Load. - IsExternal(), class_id(), - index_scale(), array, index.reg()) - : __ ElementAddressForIntIndex(IsExternal(), class_id(), - index_scale(), array, - Smi::Cast(index.constant()).Value()); - // Warning: element_address may use register TMP as base. - } else { - if (index.IsRegister()) { - __ LoadElementAddressForRegIndex(address, - true, // Load. - IsExternal(), class_id(), index_scale(), - array, index.reg()); - } else { - __ LoadElementAddressForIntIndex(address, IsExternal(), class_id(), - index_scale(), array, - Smi::Cast(index.constant()).Value()); - } - } - - if ((representation() == kUnboxedDouble) || - (representation() == kUnboxedFloat32x4) || - (representation() == kUnboxedInt32x4)) { - DRegister result = locs()->out(0).fpu_reg(); - switch (class_id()) { - case kTypedDataFloat32ArrayCid: - // Load single precision float. - __ lwc1(EvenFRegisterOf(result), element_address); - break; - case kTypedDataFloat64ArrayCid: - __ LoadDFromOffset(result, element_address.base(), - element_address.offset()); - break; - case kTypedDataInt32x4ArrayCid: - case kTypedDataFloat32x4ArrayCid: - UNIMPLEMENTED(); - break; - } - return; - } - - if ((representation() == kUnboxedUint32) || - (representation() == kUnboxedInt32)) { - const Register result = locs()->out(0).reg(); - switch (class_id()) { - case kTypedDataInt32ArrayCid: - ASSERT(representation() == kUnboxedInt32); - if (aligned()) { - __ lw(result, element_address); - } else { - __ LoadWordUnaligned(result, address, TMP); - } - break; - case kTypedDataUint32ArrayCid: - ASSERT(representation() == kUnboxedUint32); - if (aligned()) { - __ lw(result, element_address); - } else { - __ LoadWordUnaligned(result, address, TMP); - } - break; - default: - UNREACHABLE(); - } - return; - } - - ASSERT(representation() == kTagged); - - const Register result = locs()->out(0).reg(); - switch (class_id()) { - case kTypedDataInt8ArrayCid: - ASSERT(index_scale() == 1); - __ lb(result, element_address); - __ SmiTag(result); - break; - case kTypedDataUint8ArrayCid: - case kTypedDataUint8ClampedArrayCid: - case kExternalTypedDataUint8ArrayCid: - case kExternalTypedDataUint8ClampedArrayCid: - case kOneByteStringCid: - case kExternalOneByteStringCid: - ASSERT(index_scale() == 1); - __ lbu(result, element_address); - __ SmiTag(result); - break; - case kTypedDataInt16ArrayCid: - if (aligned()) { - __ lh(result, element_address); - } else { - __ LoadHalfWordUnaligned(result, address, TMP); - } - __ SmiTag(result); - break; - case kTypedDataUint16ArrayCid: - case kTwoByteStringCid: - case kExternalTwoByteStringCid: - if (aligned()) { - __ lhu(result, element_address); - } else { - __ LoadHalfWordUnsignedUnaligned(result, address, TMP); - } - __ SmiTag(result); - break; - default: - ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid)); - ASSERT(aligned()); - __ lw(result, element_address); - break; - } -} - - -LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RequiresRegister()); - - // TODO(zerny): Handle mints properly once possible. - ASSERT(representation() == kTagged); - summary->set_out(0, Location::RequiresRegister()); - - return summary; -} - - -void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - // The string register points to the backing store for external strings. - const Register str = locs()->in(0).reg(); - const Location index = locs()->in(1); - - Address element_address = __ ElementAddressForRegIndex( - true, IsExternal(), class_id(), index_scale(), str, index.reg()); - // Warning: element_address may use register TMP as base. - - ASSERT(representation() == kTagged); - Register result = locs()->out(0).reg(); - switch (class_id()) { - case kOneByteStringCid: - case kExternalOneByteStringCid: - switch (element_count()) { - case 1: - __ lbu(result, element_address); - break; - case 2: - __ lhu(result, element_address); - break; - case 4: // Loading multiple code units is disabled on MIPS. - default: - UNREACHABLE(); - } - __ SmiTag(result); - break; - case kTwoByteStringCid: - case kExternalTwoByteStringCid: - switch (element_count()) { - case 1: - __ lhu(result, element_address); - break; - case 2: // Loading multiple code units is disabled on MIPS. - default: - UNREACHABLE(); - } - __ SmiTag(result); - break; - default: - UNREACHABLE(); - break; - } -} - - -Representation StoreIndexedInstr::RequiredInputRepresentation( - intptr_t idx) const { - // Array can be a Dart object or a pointer to external data. - if (idx == 0) return kNoRepresentation; // Flexible input representation. - if (idx == 1) return kTagged; // Index is a smi. - ASSERT(idx == 2); - switch (class_id_) { - case kArrayCid: - case kOneByteStringCid: - case kTypedDataInt8ArrayCid: - case kTypedDataUint8ArrayCid: - case kExternalTypedDataUint8ArrayCid: - case kTypedDataUint8ClampedArrayCid: - case kExternalTypedDataUint8ClampedArrayCid: - case kTypedDataInt16ArrayCid: - case kTypedDataUint16ArrayCid: - return kTagged; - case kTypedDataInt32ArrayCid: - return kUnboxedInt32; - case kTypedDataUint32ArrayCid: - return kUnboxedUint32; - case kTypedDataFloat32ArrayCid: - case kTypedDataFloat64ArrayCid: - return kUnboxedDouble; - case kTypedDataFloat32x4ArrayCid: - return kUnboxedFloat32x4; - case kTypedDataInt32x4ArrayCid: - return kUnboxedInt32x4; - default: - UNIMPLEMENTED(); - return kTagged; - } -} - - -LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 3; - const intptr_t kNumTemps = aligned() ? 0 : 2; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::RequiresRegister()); - if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { - locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); - } else { - locs->set_in(1, Location::WritableRegister()); - } - switch (class_id()) { - case kArrayCid: - locs->set_in(2, ShouldEmitStoreBarrier() - ? Location::WritableRegister() - : Location::RegisterOrConstant(value())); - break; - case kExternalTypedDataUint8ArrayCid: - case kExternalTypedDataUint8ClampedArrayCid: - case kTypedDataInt8ArrayCid: - case kTypedDataUint8ArrayCid: - case kTypedDataUint8ClampedArrayCid: - case kOneByteStringCid: - case kTypedDataInt16ArrayCid: - case kTypedDataUint16ArrayCid: - case kTypedDataInt32ArrayCid: - case kTypedDataUint32ArrayCid: - locs->set_in(2, Location::RequiresRegister()); - break; - case kTypedDataFloat32ArrayCid: - case kTypedDataFloat64ArrayCid: // TODO(srdjan): Support Float64 constants. - case kTypedDataInt32x4ArrayCid: - case kTypedDataFloat32x4ArrayCid: - locs->set_in(2, Location::RequiresFpuRegister()); - break; - default: - UNREACHABLE(); - return NULL; - } - if (!aligned()) { - locs->set_temp(0, Location::RequiresRegister()); - locs->set_temp(1, Location::RequiresRegister()); - } - return locs; -} - - -void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("StoreIndexedInstr"); - // The array register points to the backing store for external arrays. - const Register array = locs()->in(0).reg(); - const Location index = locs()->in(1); - const Register address = aligned() ? kNoRegister : locs()->temp(0).reg(); - const Register scratch = aligned() ? kNoRegister : locs()->temp(1).reg(); - - Address element_address(kNoRegister); - if (aligned()) { - element_address = - index.IsRegister() - ? __ ElementAddressForRegIndex(false, // Store. - IsExternal(), class_id(), - index_scale(), array, index.reg()) - : __ ElementAddressForIntIndex(IsExternal(), class_id(), - index_scale(), array, - Smi::Cast(index.constant()).Value()); - ASSERT(element_address.base() != TMP); // Allowed for load only. - } else { - if (index.IsRegister()) { - __ LoadElementAddressForRegIndex(address, - false, // Store. - IsExternal(), class_id(), index_scale(), - array, index.reg()); - } else { - __ LoadElementAddressForIntIndex(address, IsExternal(), class_id(), - index_scale(), array, - Smi::Cast(index.constant()).Value()); - } - } - - switch (class_id()) { - case kArrayCid: - ASSERT(aligned()); - if (ShouldEmitStoreBarrier()) { - Register value = locs()->in(2).reg(); - __ StoreIntoObject(array, element_address, value); - } else if (locs()->in(2).IsConstant()) { - const Object& constant = locs()->in(2).constant(); - __ StoreIntoObjectNoBarrier(array, element_address, constant); - } else { - Register value = locs()->in(2).reg(); - __ StoreIntoObjectNoBarrier(array, element_address, value); - } - break; - case kTypedDataInt8ArrayCid: - case kTypedDataUint8ArrayCid: - case kExternalTypedDataUint8ArrayCid: - case kOneByteStringCid: { - ASSERT(aligned()); - if (locs()->in(2).IsConstant()) { - const Smi& constant = Smi::Cast(locs()->in(2).constant()); - __ LoadImmediate(TMP, static_cast(constant.Value())); - __ sb(TMP, element_address); - } else { - Register value = locs()->in(2).reg(); - __ SmiUntag(TMP, value); - __ sb(TMP, element_address); - } - break; - } - case kTypedDataUint8ClampedArrayCid: - case kExternalTypedDataUint8ClampedArrayCid: { - ASSERT(aligned()); - if (locs()->in(2).IsConstant()) { - const Smi& constant = Smi::Cast(locs()->in(2).constant()); - intptr_t value = constant.Value(); - // Clamp to 0x0 or 0xFF respectively. - if (value > 0xFF) { - value = 0xFF; - } else if (value < 0) { - value = 0; - } - __ LoadImmediate(TMP, static_cast(value)); - __ sb(TMP, element_address); - } else { - Register value = locs()->in(2).reg(); - Label store_value, bigger, smaller; - __ SmiUntag(TMP, value); - __ BranchUnsignedLess(TMP, Immediate(0xFF + 1), &store_value); - __ LoadImmediate(TMP, 0xFF); - __ slti(CMPRES1, value, Immediate(1)); - __ movn(TMP, ZR, CMPRES1); - __ Bind(&store_value); - __ sb(TMP, element_address); - } - break; - } - case kTypedDataInt16ArrayCid: - case kTypedDataUint16ArrayCid: { - Register value = locs()->in(2).reg(); - __ SmiUntag(TMP, value); - if (aligned()) { - __ sh(TMP, element_address); - } else { - __ StoreHalfWordUnaligned(TMP, address, scratch); - } - break; - } - case kTypedDataInt32ArrayCid: - case kTypedDataUint32ArrayCid: { - if (aligned()) { - __ sw(locs()->in(2).reg(), element_address); - } else { - __ StoreWordUnaligned(locs()->in(2).reg(), address, scratch); - } - break; - } - case kTypedDataFloat32ArrayCid: { - ASSERT(aligned()); - FRegister value = EvenFRegisterOf(locs()->in(2).fpu_reg()); - __ swc1(value, element_address); - break; - } - case kTypedDataFloat64ArrayCid: - ASSERT(aligned()); - __ StoreDToOffset(locs()->in(2).fpu_reg(), element_address.base(), - element_address.offset()); - break; - case kTypedDataInt32x4ArrayCid: - case kTypedDataFloat32x4ArrayCid: - UNIMPLEMENTED(); - break; - default: - UNREACHABLE(); - } -} - - -LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - - const intptr_t value_cid = value()->Type()->ToCid(); - const intptr_t field_cid = field().guarded_cid(); - - const bool emit_full_guard = !opt || (field_cid == kIllegalCid); - const bool needs_value_cid_temp_reg = - (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid)); - const bool needs_field_temp_reg = emit_full_guard; - - intptr_t num_temps = 0; - if (needs_value_cid_temp_reg) { - num_temps++; - } - if (needs_field_temp_reg) { - num_temps++; - } - - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - - for (intptr_t i = 0; i < num_temps; i++) { - summary->set_temp(i, Location::RequiresRegister()); - } - - return summary; -} - - -void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(sizeof(classid_t) == kInt16Size); - __ Comment("GuardFieldClassInstr"); - - const intptr_t value_cid = value()->Type()->ToCid(); - const intptr_t field_cid = field().guarded_cid(); - const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid; - - if (field_cid == kDynamicCid) { - if (Compiler::IsBackgroundCompilation()) { - // Field state changed while compiling. - Compiler::AbortBackgroundCompilation( - deopt_id(), - "GuardFieldClassInstr: field state changed while compiling"); - } - ASSERT(!compiler->is_optimizing()); - return; // Nothing to emit. - } - - const bool emit_full_guard = - !compiler->is_optimizing() || (field_cid == kIllegalCid); - - const bool needs_value_cid_temp_reg = - (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid)); - - const bool needs_field_temp_reg = emit_full_guard; - - const Register value_reg = locs()->in(0).reg(); - - const Register value_cid_reg = - needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister; - - const Register field_reg = needs_field_temp_reg - ? locs()->temp(locs()->temp_count() - 1).reg() - : kNoRegister; - - Label ok, fail_label; - - Label* deopt = - compiler->is_optimizing() - ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) - : NULL; - - Label* fail = (deopt != NULL) ? deopt : &fail_label; - - if (emit_full_guard) { - __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); - - FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset()); - FieldAddress field_nullability_operand(field_reg, - Field::is_nullable_offset()); - - if (value_cid == kDynamicCid) { - LoadValueCid(compiler, value_cid_reg, value_reg); - - __ lhu(CMPRES1, field_cid_operand); - __ beq(value_cid_reg, CMPRES1, &ok); - __ lhu(TMP, field_nullability_operand); - __ subu(CMPRES1, value_cid_reg, TMP); - } else if (value_cid == kNullCid) { - __ lhu(TMP, field_nullability_operand); - __ LoadImmediate(CMPRES1, value_cid); - __ subu(CMPRES1, TMP, CMPRES1); - } else { - __ lhu(TMP, field_cid_operand); - __ LoadImmediate(CMPRES1, value_cid); - __ subu(CMPRES1, TMP, CMPRES1); - } - __ beq(CMPRES1, ZR, &ok); - - // Check if the tracked state of the guarded field can be initialized - // inline. If the field needs length check we fall through to runtime - // which is responsible for computing offset of the length field - // based on the class id. - // Length guard will be emitted separately when needed via GuardFieldLength - // instruction after GuardFieldClass. - if (!field().needs_length_check()) { - // Uninitialized field can be handled inline. Check if the - // field is still unitialized. - __ lhu(CMPRES1, field_cid_operand); - __ BranchNotEqual(CMPRES1, Immediate(kIllegalCid), fail); - - if (value_cid == kDynamicCid) { - __ sh(value_cid_reg, field_cid_operand); - __ sh(value_cid_reg, field_nullability_operand); - } else { - __ LoadImmediate(TMP, value_cid); - __ sh(TMP, field_cid_operand); - __ sh(TMP, field_nullability_operand); - } - - if (deopt == NULL) { - ASSERT(!compiler->is_optimizing()); - __ b(&ok); - } - } - - if (deopt == NULL) { - ASSERT(!compiler->is_optimizing()); - __ Bind(fail); - - __ lhu(CMPRES1, FieldAddress(field_reg, Field::guarded_cid_offset())); - __ BranchEqual(CMPRES1, Immediate(kDynamicCid), &ok); - - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(field_reg, Address(SP, 1 * kWordSize)); - __ sw(value_reg, Address(SP, 0 * kWordSize)); - __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); - __ Drop(2); // Drop the field and the value. - } - } else { - ASSERT(compiler->is_optimizing()); - ASSERT(deopt != NULL); - - // Field guard class has been initialized and is known. - if (value_cid == kDynamicCid) { - // Value's class id is not known. - __ andi(CMPRES1, value_reg, Immediate(kSmiTagMask)); - - if (field_cid != kSmiCid) { - __ beq(CMPRES1, ZR, fail); - __ LoadClassId(value_cid_reg, value_reg); - __ LoadImmediate(TMP, field_cid); - __ subu(CMPRES1, value_cid_reg, TMP); - } - - if (field().is_nullable() && (field_cid != kNullCid)) { - __ beq(CMPRES1, ZR, &ok); - if (field_cid != kSmiCid) { - __ LoadImmediate(TMP, kNullCid); - __ subu(CMPRES1, value_cid_reg, TMP); - } else { - __ LoadObject(TMP, Object::null_object()); - __ subu(CMPRES1, value_reg, TMP); - } - } - - __ bne(CMPRES1, ZR, fail); - } else { - // Both value's and field's class id is known. - ASSERT((value_cid != field_cid) && (value_cid != nullability)); - __ b(fail); - } - } - __ Bind(&ok); -} - - -LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - - if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) { - const intptr_t kNumTemps = 1; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - // We need temporaries for field object. - summary->set_temp(0, Location::RequiresRegister()); - return summary; - } - LocationSummary* summary = - new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - return summary; -} - - -void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - if (field().guarded_list_length() == Field::kNoFixedLength) { - if (Compiler::IsBackgroundCompilation()) { - // Field state changed while compiling. - Compiler::AbortBackgroundCompilation( - deopt_id(), - "GuardFieldLengthInstr: field state changed while compiling"); - } - ASSERT(!compiler->is_optimizing()); - return; // Nothing to emit. - } - - Label* deopt = - compiler->is_optimizing() - ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) - : NULL; - - const Register value_reg = locs()->in(0).reg(); - - if (!compiler->is_optimizing() || - (field().guarded_list_length() == Field::kUnknownFixedLength)) { - const Register field_reg = locs()->temp(0).reg(); - - Label ok; - - __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); - - __ lb(CMPRES1, - FieldAddress(field_reg, - Field::guarded_list_length_in_object_offset_offset())); - __ blez(CMPRES1, &ok); - - __ lw(CMPRES2, - FieldAddress(field_reg, Field::guarded_list_length_offset())); - - // Load the length from the value. GuardFieldClass already verified that - // value's class matches guarded class id of the field. - // CMPRES1 contains offset already corrected by -kHeapObjectTag that is - // why we can use Address instead of FieldAddress. - __ addu(TMP, value_reg, CMPRES1); - __ lw(TMP, Address(TMP)); - - if (deopt == NULL) { - __ beq(CMPRES2, TMP, &ok); - - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(field_reg, Address(SP, 1 * kWordSize)); - __ sw(value_reg, Address(SP, 0 * kWordSize)); - __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); - __ Drop(2); // Drop the field and the value. - } else { - __ bne(CMPRES2, TMP, deopt); - } - - __ Bind(&ok); - } else { - ASSERT(compiler->is_optimizing()); - ASSERT(field().guarded_list_length() >= 0); - ASSERT(field().guarded_list_length_in_object_offset() != - Field::kUnknownLengthOffset); - - __ lw(CMPRES1, - FieldAddress(value_reg, - field().guarded_list_length_in_object_offset())); - __ LoadImmediate(TMP, Smi::RawValue(field().guarded_list_length())); - __ bne(CMPRES1, TMP, deopt); - } -} - - -class BoxAllocationSlowPath : public SlowPathCode { - public: - BoxAllocationSlowPath(Instruction* instruction, - const Class& cls, - Register result) - : instruction_(instruction), cls_(cls), result_(result) {} - - virtual void EmitNativeCode(FlowGraphCompiler* compiler) { - if (Assembler::EmittingComments()) { - __ Comment("%s slow path allocation of %s", instruction_->DebugName(), - String::Handle(cls_.ScrubbedName()).ToCString()); - } - __ Bind(entry_label()); - const Code& stub = Code::ZoneHandle( - compiler->zone(), StubCode::GetAllocationStubForClass(cls_)); - const StubEntry stub_entry(stub); - - LocationSummary* locs = instruction_->locs(); - locs->live_registers()->Remove(Location::RegisterLocation(result_)); - - compiler->SaveLiveRegisters(locs); - compiler->GenerateCall(TokenPosition::kNoSource, // No token position. - stub_entry, RawPcDescriptors::kOther, locs); - compiler->AddStubCallTarget(stub); - if (result_ != V0) { - __ mov(result_, V0); - } - compiler->RestoreLiveRegisters(locs); - __ b(exit_label()); - } - - static void Allocate(FlowGraphCompiler* compiler, - Instruction* instruction, - const Class& cls, - Register result, - Register temp) { - if (compiler->intrinsic_mode()) { - __ TryAllocate(cls, compiler->intrinsic_slow_path_label(), result, temp); - } else { - BoxAllocationSlowPath* slow_path = - new BoxAllocationSlowPath(instruction, cls, result); - compiler->AddSlowPathCode(slow_path); - - __ TryAllocate(cls, slow_path->entry_label(), result, temp); - __ Bind(slow_path->exit_label()); - } - } - - private: - Instruction* instruction_; - const Class& cls_; - const Register result_; -}; - - -LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = - (IsUnboxedStore() && opt) ? 2 : ((IsPotentialUnboxedStore()) ? 3 : 0); - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, - ((IsUnboxedStore() && opt && is_initialization()) || - IsPotentialUnboxedStore()) - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall); - - summary->set_in(0, Location::RequiresRegister()); - if (IsUnboxedStore() && opt) { - summary->set_in(1, Location::RequiresFpuRegister()); - summary->set_temp(0, Location::RequiresRegister()); - summary->set_temp(1, Location::RequiresRegister()); - } else if (IsPotentialUnboxedStore()) { - summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister() - : Location::RequiresRegister()); - summary->set_temp(0, Location::RequiresRegister()); - summary->set_temp(1, Location::RequiresRegister()); - summary->set_temp(2, opt ? Location::RequiresFpuRegister() - : Location::FpuRegisterLocation(D1)); - } else { - summary->set_in(1, ShouldEmitStoreBarrier() - ? Location::WritableRegister() - : Location::RegisterOrConstant(value())); - } - return summary; -} - - -static void EnsureMutableBox(FlowGraphCompiler* compiler, - StoreInstanceFieldInstr* instruction, - Register box_reg, - const Class& cls, - Register instance_reg, - intptr_t offset, - Register temp) { - Label done; - __ lw(box_reg, FieldAddress(instance_reg, offset)); - __ BranchNotEqual(box_reg, Object::null_object(), &done); - BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp); - __ mov(temp, box_reg); - __ StoreIntoObjectOffset(instance_reg, offset, temp); - __ Bind(&done); -} - - -void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(sizeof(classid_t) == kInt16Size); - Label skip_store; - - Register instance_reg = locs()->in(0).reg(); - - if (IsUnboxedStore() && compiler->is_optimizing()) { - DRegister value = locs()->in(1).fpu_reg(); - Register temp = locs()->temp(0).reg(); - Register temp2 = locs()->temp(1).reg(); - const intptr_t cid = field().UnboxedFieldCid(); - - if (is_initialization()) { - const Class* cls = NULL; - switch (cid) { - case kDoubleCid: - cls = &compiler->double_class(); - break; - default: - UNREACHABLE(); - } - - BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2); - __ mov(temp2, temp); - __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2); - } else { - __ lw(temp, FieldAddress(instance_reg, offset_in_bytes_)); - } - switch (cid) { - case kDoubleCid: - __ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag); - break; - default: - UNREACHABLE(); - } - return; - } - - if (IsPotentialUnboxedStore()) { - Register value_reg = locs()->in(1).reg(); - Register temp = locs()->temp(0).reg(); - Register temp2 = locs()->temp(1).reg(); - DRegister fpu_temp = locs()->temp(2).fpu_reg(); - - if (ShouldEmitStoreBarrier()) { - // Value input is a writable register and should be manually preserved - // across allocation slow-path. - locs()->live_registers()->Add(locs()->in(1), kTagged); - } - - Label store_pointer; - Label store_double; - - __ LoadObject(temp, Field::ZoneHandle(Z, field().Original())); - - __ lhu(temp2, FieldAddress(temp, Field::is_nullable_offset())); - __ BranchEqual(temp2, Immediate(kNullCid), &store_pointer); - - __ lbu(temp2, FieldAddress(temp, Field::kind_bits_offset())); - __ andi(CMPRES1, temp2, Immediate(1 << Field::kUnboxingCandidateBit)); - __ beq(CMPRES1, ZR, &store_pointer); - - __ lhu(temp2, FieldAddress(temp, Field::guarded_cid_offset())); - __ BranchEqual(temp2, Immediate(kDoubleCid), &store_double); - - // Fall through. - __ b(&store_pointer); - - if (!compiler->is_optimizing()) { - locs()->live_registers()->Add(locs()->in(0)); - locs()->live_registers()->Add(locs()->in(1)); - } - - { - __ Bind(&store_double); - EnsureMutableBox(compiler, this, temp, compiler->double_class(), - instance_reg, offset_in_bytes_, temp2); - __ LoadDFromOffset(fpu_temp, value_reg, - Double::value_offset() - kHeapObjectTag); - __ StoreDToOffset(fpu_temp, temp, - Double::value_offset() - kHeapObjectTag); - __ b(&skip_store); - } - - __ Bind(&store_pointer); - } - - if (ShouldEmitStoreBarrier()) { - Register value_reg = locs()->in(1).reg(); - __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, value_reg, - CanValueBeSmi()); - } else { - if (locs()->in(1).IsConstant()) { - __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes_, - locs()->in(1).constant()); - } else { - Register value_reg = locs()->in(1).reg(); - __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes_, - value_reg); - } - } - __ Bind(&skip_store); -} - - -LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -// When the parser is building an implicit static getter for optimization, -// it can generate a function body where deoptimization ids do not line up -// with the unoptimized code. -// -// This is safe only so long as LoadStaticFieldInstr cannot deoptimize. -void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("LoadStaticFieldInstr"); - Register field = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - __ LoadFromOffset(result, field, - Field::static_value_offset() - kHeapObjectTag); -} - - -LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - LocationSummary* locs = - new (zone) LocationSummary(zone, 1, 1, LocationSummary::kNoCall); - locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister() - : Location::RequiresRegister()); - locs->set_temp(0, Location::RequiresRegister()); - return locs; -} - - -void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("StoreStaticFieldInstr"); - Register value = locs()->in(0).reg(); - Register temp = locs()->temp(0).reg(); - - __ LoadObject(temp, Field::ZoneHandle(Z, field().Original())); - if (this->value()->NeedsStoreBuffer()) { - __ StoreIntoObject(temp, FieldAddress(temp, Field::static_value_offset()), - value, CanValueBeSmi()); - } else { - __ StoreIntoObjectNoBarrier( - temp, FieldAddress(temp, Field::static_value_offset()), value); - } -} - - -LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 3; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - summary->set_in(0, Location::RegisterLocation(A0)); // Instance. - summary->set_in(1, Location::RegisterLocation(A1)); // Instant. type args. - summary->set_in(2, Location::RegisterLocation(A2)); // Function type args. - summary->set_out(0, Location::RegisterLocation(V0)); - return summary; -} - - -void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(locs()->in(0).reg() == A0); // Value. - ASSERT(locs()->in(1).reg() == A1); // Instantiator type arguments. - ASSERT(locs()->in(2).reg() == A2); // Function type arguments. - - __ Comment("InstanceOfInstr"); - compiler->GenerateInstanceOf(token_pos(), deopt_id(), type(), locs()); - ASSERT(locs()->out(0).reg() == V0); -} - - -LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - locs->set_in(0, Location::RegisterLocation(A0)); - locs->set_in(1, Location::RegisterLocation(A1)); - locs->set_out(0, Location::RegisterLocation(V0)); - return locs; -} - - -// Inlines array allocation for known constant values. -static void InlineArrayAllocation(FlowGraphCompiler* compiler, - intptr_t num_elements, - Label* slow_path, - Label* done) { - const int kInlineArraySize = 12; // Same as kInlineInstanceSize. - const Register kLengthReg = A1; - const Register kElemTypeReg = A0; - const intptr_t instance_size = Array::InstanceSize(num_elements); - - __ TryAllocateArray(kArrayCid, instance_size, slow_path, - V0, // instance - T1, // end address - T2, T3); - // V0: new object start as a tagged pointer. - // T1: new object end address. - - // Store the type argument field. - __ StoreIntoObjectNoBarrier( - V0, FieldAddress(V0, Array::type_arguments_offset()), kElemTypeReg); - - // Set the length field. - __ StoreIntoObjectNoBarrier(V0, FieldAddress(V0, Array::length_offset()), - kLengthReg); - - // Initialize all array elements to raw_null. - // V0: new object start as a tagged pointer. - // T1: new object end address. - // T2: iterator which initially points to the start of the variable - // data area to be initialized. - // T7: null. - if (num_elements > 0) { - const intptr_t array_size = instance_size - sizeof(RawArray); - __ LoadObject(T7, Object::null_object()); - __ AddImmediate(T2, V0, sizeof(RawArray) - kHeapObjectTag); - if (array_size < (kInlineArraySize * kWordSize)) { - intptr_t current_offset = 0; - while (current_offset < array_size) { - __ sw(T7, Address(T2, current_offset)); - current_offset += kWordSize; - } - } else { - Label init_loop; - __ Bind(&init_loop); - __ sw(T7, Address(T2, 0)); - __ addiu(T2, T2, Immediate(kWordSize)); - __ BranchUnsignedLess(T2, T1, &init_loop); - } - } - __ b(done); -} - - -void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("CreateArrayInstr"); - const Register kLengthReg = A1; - const Register kElemTypeReg = A0; - const Register kResultReg = V0; - ASSERT(locs()->in(0).reg() == kElemTypeReg); - ASSERT(locs()->in(1).reg() == kLengthReg); - - Label slow_path, done; - if (compiler->is_optimizing() && !FLAG_precompiled_mode && - num_elements()->BindsToConstant() && - num_elements()->BoundConstant().IsSmi()) { - const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value(); - if ((length >= 0) && (length <= Array::kMaxElements)) { - Label slow_path, done; - InlineArrayAllocation(compiler, length, &slow_path, &done); - __ Bind(&slow_path); - __ PushObject(Object::null_object()); // Make room for the result. - __ Push(kLengthReg); // length. - __ Push(kElemTypeReg); - compiler->GenerateRuntimeCall(token_pos(), deopt_id(), - kAllocateArrayRuntimeEntry, 2, locs()); - __ Drop(2); - __ Pop(kResultReg); - __ Bind(&done); - return; - } - } - - __ Bind(&slow_path); - const Code& stub = Code::ZoneHandle(compiler->zone(), - StubCode::AllocateArray_entry()->code()); - compiler->AddStubCallTarget(stub); - compiler->GenerateCallWithDeopt(token_pos(), deopt_id(), - *StubCode::AllocateArray_entry(), - RawPcDescriptors::kOther, locs()); - __ Bind(&done); - ASSERT(locs()->out(0).reg() == kResultReg); -} - - -LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = - (IsUnboxedLoad() && opt) ? 1 : ((IsPotentialUnboxedLoad()) ? 2 : 0); - LocationSummary* locs = new (zone) LocationSummary( - zone, kNumInputs, kNumTemps, (opt && !IsPotentialUnboxedLoad()) - ? LocationSummary::kNoCall - : LocationSummary::kCallOnSlowPath); - - locs->set_in(0, Location::RequiresRegister()); - - if (IsUnboxedLoad() && opt) { - locs->set_temp(0, Location::RequiresRegister()); - } else if (IsPotentialUnboxedLoad()) { - locs->set_temp(0, opt ? Location::RequiresFpuRegister() - : Location::FpuRegisterLocation(D1)); - locs->set_temp(1, Location::RequiresRegister()); - } - locs->set_out(0, Location::RequiresRegister()); - return locs; -} - - -void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(sizeof(classid_t) == kInt16Size); - - Register instance_reg = locs()->in(0).reg(); - if (IsUnboxedLoad() && compiler->is_optimizing()) { - DRegister result = locs()->out(0).fpu_reg(); - Register temp = locs()->temp(0).reg(); - __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes()); - intptr_t cid = field()->UnboxedFieldCid(); - switch (cid) { - case kDoubleCid: - __ LoadDFromOffset(result, temp, - Double::value_offset() - kHeapObjectTag); - break; - default: - UNREACHABLE(); - } - return; - } - - Label done; - Register result_reg = locs()->out(0).reg(); - if (IsPotentialUnboxedLoad()) { - Register temp = locs()->temp(1).reg(); - DRegister value = locs()->temp(0).fpu_reg(); - - Label load_pointer; - Label load_double; - - __ LoadObject(result_reg, Field::ZoneHandle(field()->Original())); - - FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset()); - FieldAddress field_nullability_operand(result_reg, - Field::is_nullable_offset()); - - __ lhu(temp, field_nullability_operand); - __ BranchEqual(temp, Immediate(kNullCid), &load_pointer); - - __ lhu(temp, field_cid_operand); - __ BranchEqual(temp, Immediate(kDoubleCid), &load_double); - - // Fall through. - __ b(&load_pointer); - - if (!compiler->is_optimizing()) { - locs()->live_registers()->Add(locs()->in(0)); - } - - { - __ Bind(&load_double); - BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(), - result_reg, temp); - __ lw(temp, FieldAddress(instance_reg, offset_in_bytes())); - __ LoadDFromOffset(value, temp, Double::value_offset() - kHeapObjectTag); - __ StoreDToOffset(value, result_reg, - Double::value_offset() - kHeapObjectTag); - __ b(&done); - } - - __ Bind(&load_pointer); - } - __ LoadFieldFromOffset(result_reg, instance_reg, offset_in_bytes()); - __ Bind(&done); -} - - -LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - locs->set_in(0, Location::RegisterLocation(T0)); // Instant. type args. - locs->set_in(1, Location::RegisterLocation(T1)); // Function type args. - locs->set_out(0, Location::RegisterLocation(T0)); - return locs; -} - - -void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("InstantiateTypeInstr"); - Register instantiator_type_args_reg = locs()->in(0).reg(); - Register function_type_args_reg = locs()->in(1).reg(); - Register result_reg = locs()->out(0).reg(); - - // 'instantiator_type_args_reg' is a TypeArguments object (or null). - // 'function_type_args_reg' is a TypeArguments object (or null). - // A runtime call to instantiate the type is required. - __ addiu(SP, SP, Immediate(-4 * kWordSize)); - __ LoadObject(TMP, Object::null_object()); - __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result. - __ LoadObject(TMP, type()); - __ sw(TMP, Address(SP, 2 * kWordSize)); - __ sw(instantiator_type_args_reg, Address(SP, 1 * kWordSize)); - __ sw(function_type_args_reg, Address(SP, 0 * kWordSize)); - - compiler->GenerateRuntimeCall(token_pos(), deopt_id(), - kInstantiateTypeRuntimeEntry, 3, locs()); - // Pop instantiated type. - __ lw(result_reg, Address(SP, 3 * kWordSize)); - - // Drop instantiator and uninstantiated type. - __ addiu(SP, SP, Immediate(4 * kWordSize)); -} - - -LocationSummary* InstantiateTypeArgumentsInstr::MakeLocationSummary( - Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - locs->set_in(0, Location::RegisterLocation(T0)); // Instant. type args. - locs->set_in(1, Location::RegisterLocation(T1)); // Function type args. - locs->set_out(0, Location::RegisterLocation(T0)); - return locs; -} - - -void InstantiateTypeArgumentsInstr::EmitNativeCode( - FlowGraphCompiler* compiler) { - __ Comment("InstantiateTypeArgumentsInstr"); - Register instantiator_type_args_reg = locs()->in(0).reg(); - Register function_type_args_reg = locs()->in(1).reg(); - Register result_reg = locs()->out(0).reg(); - ASSERT(instantiator_type_args_reg == T0); - ASSERT(instantiator_type_args_reg == result_reg); - - // 'instantiator_type_args_reg' is a TypeArguments object (or null). - // 'function_type_args_reg' is a TypeArguments object (or null). - ASSERT(!type_arguments().IsUninstantiatedIdentity() && - !type_arguments().CanShareInstantiatorTypeArguments( - instantiator_class())); - // If both the instantiator and function type arguments are null and if the - // type argument vector instantiated from null becomes a vector of dynamic, - // then use null as the type arguments. - Label type_arguments_instantiated; - const intptr_t len = type_arguments().Length(); - if (type_arguments().IsRawWhenInstantiatedFromRaw(len)) { - Label non_null_type_args; - __ BranchNotEqual(instantiator_type_args_reg, Object::null_object(), - &non_null_type_args); - __ BranchEqual(function_type_args_reg, Object::null_object(), - &type_arguments_instantiated); - __ Bind(&non_null_type_args); - } - - // Lookup cache before calling runtime. - // TODO(regis): Consider moving this into a shared stub to reduce - // generated code size. - __ LoadObject(T2, type_arguments()); - __ lw(T2, FieldAddress(T2, TypeArguments::instantiations_offset())); - __ AddImmediate(T2, Array::data_offset() - kHeapObjectTag); - // The instantiations cache is initialized with Object::zero_array() and is - // therefore guaranteed to contain kNoInstantiator. No length check needed. - Label loop, next, found, slow_case; - __ Bind(&loop); - __ lw(T3, Address(T2, 0 * kWordSize)); // Cached instantiator type args. - __ bne(T3, T0, &next); - __ lw(T4, Address(T2, 1 * kWordSize)); // Cached function type args. - __ beq(T4, T1, &found); - __ Bind(&next); - __ BranchNotEqual(T3, Immediate(Smi::RawValue(StubCode::kNoInstantiator)), - &loop); - __ delay_slot()->addiu( - T2, T2, Immediate(StubCode::kInstantiationSizeInWords * kWordSize)); - __ b(&slow_case); - __ Bind(&found); - __ lw(T0, Address(T2, 2 * kWordSize)); // Cached instantiated args. - __ b(&type_arguments_instantiated); - - __ Bind(&slow_case); - // Instantiate non-null type arguments. - // A runtime call to instantiate the type arguments is required. - __ addiu(SP, SP, Immediate(-4 * kWordSize)); - __ LoadObject(TMP, Object::null_object()); - __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result. - __ LoadObject(TMP, type_arguments()); - __ sw(TMP, Address(SP, 2 * kWordSize)); - __ sw(instantiator_type_args_reg, Address(SP, 1 * kWordSize)); - __ sw(function_type_args_reg, Address(SP, 0 * kWordSize)); - - compiler->GenerateRuntimeCall(token_pos(), deopt_id(), - kInstantiateTypeArgumentsRuntimeEntry, 3, - locs()); - // Pop instantiated type arguments. - __ lw(result_reg, Address(SP, 3 * kWordSize)); - // Drop 2 type argument vectors and uninstantiated type arguments. - __ addiu(SP, SP, Immediate(4 * kWordSize)); - __ Bind(&type_arguments_instantiated); -} - - -LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary( - Zone* zone, - bool opt) const { - ASSERT(opt); - const intptr_t kNumInputs = 0; - const intptr_t kNumTemps = 3; - LocationSummary* locs = new (zone) LocationSummary( - zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); - locs->set_temp(0, Location::RegisterLocation(T1)); - locs->set_temp(1, Location::RegisterLocation(T2)); - locs->set_temp(2, Location::RegisterLocation(T3)); - locs->set_out(0, Location::RegisterLocation(V0)); - return locs; -} - - -class AllocateContextSlowPath : public SlowPathCode { - public: - explicit AllocateContextSlowPath( - AllocateUninitializedContextInstr* instruction) - : instruction_(instruction) {} - - virtual void EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("AllocateContextSlowPath"); - __ Bind(entry_label()); - - LocationSummary* locs = instruction_->locs(); - locs->live_registers()->Remove(locs->out(0)); - - compiler->SaveLiveRegisters(locs); - - __ LoadImmediate(T1, instruction_->num_context_variables()); - const Code& stub = Code::ZoneHandle( - compiler->zone(), StubCode::AllocateContext_entry()->code()); - compiler->AddStubCallTarget(stub); - compiler->GenerateCall(instruction_->token_pos(), - *StubCode::AllocateContext_entry(), - RawPcDescriptors::kOther, locs); - ASSERT(instruction_->locs()->out(0).reg() == V0); - compiler->RestoreLiveRegisters(instruction_->locs()); - __ b(exit_label()); - } - - private: - AllocateUninitializedContextInstr* instruction_; -}; - - -void AllocateUninitializedContextInstr::EmitNativeCode( - FlowGraphCompiler* compiler) { - Register temp0 = locs()->temp(0).reg(); - Register temp1 = locs()->temp(1).reg(); - Register temp2 = locs()->temp(2).reg(); - Register result = locs()->out(0).reg(); - // Try allocate the object. - AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this); - compiler->AddSlowPathCode(slow_path); - intptr_t instance_size = Context::InstanceSize(num_context_variables()); - - __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(), - result, // instance - temp0, temp1, temp2); - - // Setup up number of context variables field. - __ LoadImmediate(temp0, num_context_variables()); - __ sw(temp0, FieldAddress(result, Context::num_variables_offset())); - - __ Bind(slow_path->exit_label()); -} - - -LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 0; - const intptr_t kNumTemps = 1; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - locs->set_temp(0, Location::RegisterLocation(T1)); - locs->set_out(0, Location::RegisterLocation(V0)); - return locs; -} - - -void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(locs()->temp(0).reg() == T1); - ASSERT(locs()->out(0).reg() == V0); - - __ Comment("AllocateContextInstr"); - __ LoadImmediate(T1, num_context_variables()); - compiler->GenerateCall(token_pos(), *StubCode::AllocateContext_entry(), - RawPcDescriptors::kOther, locs()); -} - - -LocationSummary* InitStaticFieldInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 1; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - locs->set_in(0, Location::RegisterLocation(T0)); - locs->set_temp(0, Location::RegisterLocation(T1)); - return locs; -} - - -void InitStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register field = locs()->in(0).reg(); - Register temp = locs()->temp(0).reg(); - - Label call_runtime, no_call; - __ Comment("InitStaticFieldInstr"); - - __ lw(temp, FieldAddress(field, Field::static_value_offset())); - __ BranchEqual(temp, Object::sentinel(), &call_runtime); - __ BranchNotEqual(temp, Object::transition_sentinel(), &no_call); - - __ Bind(&call_runtime); - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ LoadObject(TMP, Object::null_object()); - __ sw(TMP, Address(SP, 1 * kWordSize)); // Make room for (unused) result. - __ sw(field, Address(SP, 0 * kWordSize)); - - compiler->GenerateRuntimeCall(token_pos(), deopt_id(), - kInitStaticFieldRuntimeEntry, 1, locs()); - - __ addiu(SP, SP, Immediate(2 * kWordSize)); // Purge argument and result. - - __ Bind(&no_call); -} - - -LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - locs->set_in(0, Location::RegisterLocation(T0)); - locs->set_out(0, Location::RegisterLocation(T0)); - return locs; -} - - -void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register context_value = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - - __ Comment("CloneContextInstr"); - - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ LoadObject(TMP, Object::null_object()); // Make room for the result. - __ sw(TMP, Address(SP, 1 * kWordSize)); - __ sw(context_value, Address(SP, 0 * kWordSize)); - - compiler->GenerateRuntimeCall(token_pos(), deopt_id(), - kCloneContextRuntimeEntry, 1, locs()); - __ lw(result, Address(SP, 1 * kWordSize)); // Get result (cloned context). - __ addiu(SP, SP, Immediate(2 * kWordSize)); -} - - -LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNREACHABLE(); - return NULL; -} - - -void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Bind(compiler->GetJumpLabel(this)); - compiler->AddExceptionHandler(catch_try_index(), try_index(), - compiler->assembler()->CodeSize(), - handler_token_pos(), is_generated(), - catch_handler_types_, needs_stacktrace()); - // On lazy deoptimization we patch the optimized code here to enter the - // deoptimization stub. - const intptr_t deopt_id = Thread::ToDeoptAfter(GetDeoptId()); - if (compiler->is_optimizing()) { - compiler->AddDeoptIndexAtCall(deopt_id); - } else { - compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id, - TokenPosition::kNoSource); - } - if (HasParallelMove()) { - compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); - } - // Restore SP from FP as we are coming from a throw and the code for - // popping arguments has not been run. - const intptr_t fp_sp_dist = - (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; - ASSERT(fp_sp_dist <= 0); - __ AddImmediate(SP, FP, fp_sp_dist); - - // Auxiliary variables introduced by the try catch can be captured if we are - // inside a function with yield/resume points. In this case we first need - // to restore the context to match the context at entry into the closure. - if (should_restore_closure_context()) { - const ParsedFunction& parsed_function = compiler->parsed_function(); - ASSERT(parsed_function.function().IsClosureFunction()); - LocalScope* scope = parsed_function.node_sequence()->scope(); - - LocalVariable* closure_parameter = scope->VariableAt(0); - ASSERT(!closure_parameter->is_captured()); - __ LoadFromOffset(CTX, FP, closure_parameter->index() * kWordSize); - __ LoadFieldFromOffset(CTX, CTX, Closure::context_offset()); - - const intptr_t context_index = - parsed_function.current_context_var()->index(); - __ StoreToOffset(CTX, FP, context_index * kWordSize); - } - - // Initialize exception and stack trace variables. - if (exception_var().is_captured()) { - ASSERT(stacktrace_var().is_captured()); - __ StoreIntoObjectOffset(CTX, - Context::variable_offset(exception_var().index()), - kExceptionObjectReg); - __ StoreIntoObjectOffset(CTX, - Context::variable_offset(stacktrace_var().index()), - kStackTraceObjectReg); - } else { - // Restore stack and initialize the two exception variables: - // exception and stack trace variables. - __ StoreToOffset(kExceptionObjectReg, FP, - exception_var().index() * kWordSize); - __ StoreToOffset(kStackTraceObjectReg, FP, - stacktrace_var().index() * kWordSize); - } -} - - -LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 0; - const intptr_t kNumTemps = 1; - LocationSummary* summary = new (zone) LocationSummary( - zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); - summary->set_temp(0, Location::RequiresRegister()); - return summary; -} - - -class CheckStackOverflowSlowPath : public SlowPathCode { - public: - explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction) - : instruction_(instruction) {} - - virtual void EmitNativeCode(FlowGraphCompiler* compiler) { - if (compiler->isolate()->use_osr() && osr_entry_label()->IsLinked()) { - Register value = instruction_->locs()->temp(0).reg(); - __ Comment("CheckStackOverflowSlowPathOsr"); - __ Bind(osr_entry_label()); - __ LoadImmediate(value, Thread::kOsrRequest); - __ sw(value, Address(THR, Thread::stack_overflow_flags_offset())); - } - __ Comment("CheckStackOverflowSlowPath"); - __ Bind(entry_label()); - compiler->SaveLiveRegisters(instruction_->locs()); - // pending_deoptimization_env_ is needed to generate a runtime call that - // may throw an exception. - ASSERT(compiler->pending_deoptimization_env_ == NULL); - Environment* env = compiler->SlowPathEnvironmentFor(instruction_); - compiler->pending_deoptimization_env_ = env; - compiler->GenerateRuntimeCall( - instruction_->token_pos(), instruction_->deopt_id(), - kStackOverflowRuntimeEntry, 0, instruction_->locs()); - - if (compiler->isolate()->use_osr() && !compiler->is_optimizing() && - instruction_->in_loop()) { - // In unoptimized code, record loop stack checks as possible OSR entries. - compiler->AddCurrentDescriptor(RawPcDescriptors::kOsrEntry, - instruction_->deopt_id(), - TokenPosition::kNoSource); - } - compiler->pending_deoptimization_env_ = NULL; - compiler->RestoreLiveRegisters(instruction_->locs()); - __ b(exit_label()); - } - - Label* osr_entry_label() { - ASSERT(Isolate::Current()->use_osr()); - return &osr_entry_label_; - } - - private: - CheckStackOverflowInstr* instruction_; - Label osr_entry_label_; -}; - - -void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("CheckStackOverflowInstr"); - CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this); - compiler->AddSlowPathCode(slow_path); - - __ lw(CMPRES1, Address(THR, Thread::stack_limit_offset())); - __ BranchUnsignedLessEqual(SP, CMPRES1, slow_path->entry_label()); - if (compiler->CanOSRFunction() && in_loop()) { - Register temp = locs()->temp(0).reg(); - // In unoptimized code check the usage counter to trigger OSR at loop - // stack checks. Use progressively higher thresholds for more deeply - // nested loops to attempt to hit outer loops with OSR when possible. - __ LoadObject(temp, compiler->parsed_function().function()); - intptr_t threshold = - FLAG_optimization_counter_threshold * (loop_depth() + 1); - __ lw(temp, FieldAddress(temp, Function::usage_counter_offset())); - __ BranchSignedGreaterEqual(temp, Immediate(threshold), - slow_path->osr_entry_label()); - } - if (compiler->ForceSlowPathForStackOverflow()) { - __ b(slow_path->entry_label()); - } - __ Bind(slow_path->exit_label()); -} - - -static void EmitSmiShiftLeft(FlowGraphCompiler* compiler, - BinarySmiOpInstr* shift_left) { - const LocationSummary& locs = *shift_left->locs(); - Register left = locs.in(0).reg(); - Register result = locs.out(0).reg(); - Label* deopt = shift_left->CanDeoptimize() - ? compiler->AddDeoptStub(shift_left->deopt_id(), - ICData::kDeoptBinarySmiOp) - : NULL; - - __ Comment("EmitSmiShiftLeft"); - - if (locs.in(1).IsConstant()) { - const Object& constant = locs.in(1).constant(); - ASSERT(constant.IsSmi()); - // Immediate shift operation takes 5 bits for the count. - const intptr_t kCountLimit = 0x1F; - const intptr_t value = Smi::Cast(constant).Value(); - ASSERT((0 < value) && (value < kCountLimit)); - if (shift_left->can_overflow()) { - // Check for overflow (preserve left). - __ sll(TMP, left, value); - __ sra(CMPRES1, TMP, value); - __ bne(CMPRES1, left, deopt); // Overflow. - } - // Shift for result now we know there is no overflow. - __ sll(result, left, value); - return; - } - - // Right (locs.in(1)) is not constant. - Register right = locs.in(1).reg(); - Range* right_range = shift_left->right_range(); - if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) { - // TODO(srdjan): Implement code below for is_truncating(). - // If left is constant, we know the maximal allowed size for right. - const Object& obj = shift_left->left()->BoundConstant(); - if (obj.IsSmi()) { - const intptr_t left_int = Smi::Cast(obj).Value(); - if (left_int == 0) { - __ bltz(right, deopt); - __ mov(result, ZR); - return; - } - const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int); - const bool right_needs_check = - !RangeUtils::IsWithin(right_range, 0, max_right - 1); - if (right_needs_check) { - const Immediate& max_right_imm = - Immediate(reinterpret_cast(Smi::New(max_right))); - __ BranchUnsignedGreaterEqual(right, max_right_imm, deopt); - } - __ SmiUntag(TMP, right); - __ sllv(result, left, TMP); - } - return; - } - - const bool right_needs_check = - !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1)); - if (!shift_left->can_overflow()) { - if (right_needs_check) { - if (!RangeUtils::IsPositive(right_range)) { - ASSERT(shift_left->CanDeoptimize()); - __ bltz(right, deopt); - } - Label done, is_not_zero; - - __ sltiu(CMPRES1, right, - Immediate(reinterpret_cast(Smi::New(Smi::kBits)))); - __ movz(result, ZR, CMPRES1); // result = right >= kBits ? 0 : result. - __ sra(TMP, right, kSmiTagSize); - __ sllv(TMP, left, TMP); - // result = right < kBits ? left << right : result. - __ movn(result, TMP, CMPRES1); - } else { - __ sra(TMP, right, kSmiTagSize); - __ sllv(result, left, TMP); - } - } else { - if (right_needs_check) { - const Immediate& bits_imm = - Immediate(reinterpret_cast(Smi::New(Smi::kBits))); - ASSERT(shift_left->CanDeoptimize()); - __ BranchUnsignedGreaterEqual(right, bits_imm, deopt); - } - // Left is not a constant. - Register temp = locs.temp(0).reg(); - // Check if count too large for handling it inlined. - __ SmiUntag(temp, right); - // Overflow test (preserve left, right, and temp); - __ sllv(CMPRES1, left, temp); - __ srav(CMPRES1, CMPRES1, temp); - __ bne(CMPRES1, left, deopt); // Overflow. - // Shift for result now we know there is no overflow. - __ sllv(result, left, temp); - } -} - - -class CheckedSmiSlowPath : public SlowPathCode { - public: - CheckedSmiSlowPath(CheckedSmiOpInstr* instruction, intptr_t try_index) - : instruction_(instruction), try_index_(try_index) {} - - virtual void EmitNativeCode(FlowGraphCompiler* compiler) { - if (Assembler::EmittingComments()) { - __ Comment("slow path smi operation"); - } - __ Bind(entry_label()); - LocationSummary* locs = instruction_->locs(); - Register result = locs->out(0).reg(); - locs->live_registers()->Remove(Location::RegisterLocation(result)); - - compiler->SaveLiveRegisters(locs); - if (instruction_->env() != NULL) { - Environment* env = compiler->SlowPathEnvironmentFor(instruction_); - compiler->pending_deoptimization_env_ = env; - } - __ Push(locs->in(0).reg()); - __ Push(locs->in(1).reg()); - const String& selector = - String::Handle(instruction_->call()->ic_data()->target_name()); - const Array& argument_names = - Array::Handle(instruction_->call()->ic_data()->arguments_descriptor()); - compiler->EmitMegamorphicInstanceCall( - selector, argument_names, instruction_->call()->ArgumentCount(), - instruction_->call()->deopt_id(), instruction_->call()->token_pos(), - locs, try_index_, - /* slow_path_argument_count = */ 2); - __ mov(result, V0); - compiler->RestoreLiveRegisters(locs); - __ b(exit_label()); - compiler->pending_deoptimization_env_ = NULL; - } - - private: - CheckedSmiOpInstr* instruction_; - intptr_t try_index_; -}; - - -LocationSummary* CheckedSmiOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) LocationSummary( - zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - CheckedSmiSlowPath* slow_path = - new CheckedSmiSlowPath(this, compiler->CurrentTryIndex()); - compiler->AddSlowPathCode(slow_path); - // Test operands if necessary. - Register left = locs()->in(0).reg(); - Register right = locs()->in(1).reg(); - Register result = locs()->out(0).reg(); - intptr_t left_cid = this->left()->Type()->ToCid(); - intptr_t right_cid = this->right()->Type()->ToCid(); - bool combined_smi_check = false; - if (this->left()->definition() == this->right()->definition()) { - __ andi(CMPRES1, left, Immediate(kSmiTagMask)); - } else if (left_cid == kSmiCid) { - __ andi(CMPRES1, right, Immediate(kSmiTagMask)); - } else if (right_cid == kSmiCid) { - __ andi(CMPRES1, left, Immediate(kSmiTagMask)); - } else { - combined_smi_check = true; - __ or_(result, left, right); - __ andi(CMPRES1, result, Immediate(kSmiTagMask)); - } - __ bne(CMPRES1, ZR, slow_path->entry_label()); - switch (op_kind()) { - case Token::kADD: - __ AdduDetectOverflow(result, left, right, CMPRES1); - __ bltz(CMPRES1, slow_path->entry_label()); - break; - case Token::kSUB: - __ SubuDetectOverflow(result, left, right, CMPRES1); - __ bltz(CMPRES1, slow_path->entry_label()); - break; - case Token::kMUL: - __ sra(TMP, left, kSmiTagSize); - __ mult(TMP, right); - __ mflo(result); - __ mfhi(CMPRES2); - __ sra(CMPRES1, result, 31); - __ bne(CMPRES1, CMPRES2, slow_path->entry_label()); - break; - case Token::kBIT_OR: - // Operation part of combined smi check. - if (!combined_smi_check) { - __ or_(result, left, right); - } - break; - case Token::kBIT_AND: - __ and_(result, left, right); - break; - case Token::kBIT_XOR: - __ xor_(result, left, right); - break; - case Token::kSHL: - ASSERT(result != left); - ASSERT(result != right); - __ BranchUnsignedGreater(right, Immediate(Smi::RawValue(Smi::kBits)), - slow_path->entry_label()); - // Check for overflow by shifting left and shifting back arithmetically. - // If the result is different from the original, there was overflow. - __ delay_slot()->SmiUntag(TMP, right); - __ sllv(result, left, TMP); - __ srav(CMPRES1, result, TMP); - __ bne(CMPRES1, left, slow_path->entry_label()); - break; - case Token::kSHR: - __ BranchUnsignedGreater(right, Immediate(Smi::RawValue(Smi::kBits)), - slow_path->entry_label()); - __ delay_slot()->SmiUntag(result, right); - __ SmiUntag(TMP, left); - __ srav(result, TMP, result); - __ SmiTag(result); - break; - default: - UNIMPLEMENTED(); - } - __ Bind(slow_path->exit_label()); -} - - -class CheckedSmiComparisonSlowPath : public SlowPathCode { - public: - CheckedSmiComparisonSlowPath(CheckedSmiComparisonInstr* instruction, - intptr_t try_index, - BranchLabels labels, - bool merged) - : instruction_(instruction), - try_index_(try_index), - labels_(labels), - merged_(merged) {} - - virtual void EmitNativeCode(FlowGraphCompiler* compiler) { - if (Assembler::EmittingComments()) { - __ Comment("slow path smi operation"); - } - __ Bind(entry_label()); - LocationSummary* locs = instruction_->locs(); - Register result = merged_ ? locs->temp(0).reg() : locs->out(0).reg(); - locs->live_registers()->Remove(Location::RegisterLocation(result)); - - compiler->SaveLiveRegisters(locs); - if (instruction_->env() != NULL) { - Environment* env = compiler->SlowPathEnvironmentFor(instruction_); - compiler->pending_deoptimization_env_ = env; - } - __ Push(locs->in(0).reg()); - __ Push(locs->in(1).reg()); - String& selector = - String::Handle(instruction_->call()->ic_data()->target_name()); - Array& argument_names = - Array::Handle(instruction_->call()->ic_data()->arguments_descriptor()); - compiler->EmitMegamorphicInstanceCall( - selector, argument_names, instruction_->call()->ArgumentCount(), - instruction_->call()->deopt_id(), instruction_->call()->token_pos(), - locs, try_index_, - /* slow_path_argument_count = */ 2); - __ mov(result, V0); - compiler->RestoreLiveRegisters(locs); - compiler->pending_deoptimization_env_ = NULL; - if (merged_) { - __ BranchEqual(result, Bool::True(), instruction_->is_negated() - ? labels_.false_label - : labels_.true_label); - __ b(instruction_->is_negated() ? labels_.true_label - : labels_.false_label); - } else { - __ b(exit_label()); - } - } - - private: - CheckedSmiComparisonInstr* instruction_; - intptr_t try_index_; - BranchLabels labels_; - bool merged_; -}; - - -LocationSummary* CheckedSmiComparisonInstr::MakeLocationSummary( - Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 1; - LocationSummary* summary = new (zone) LocationSummary( - zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RequiresRegister()); - summary->set_temp(0, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -Condition CheckedSmiComparisonInstr::EmitComparisonCode( - FlowGraphCompiler* compiler, - BranchLabels labels) { - return EmitSmiComparisonOp(compiler, *locs(), kind()); -} - - -#define EMIT_SMI_CHECK \ - Register left = locs()->in(0).reg(); \ - Register right = locs()->in(1).reg(); \ - Register temp = locs()->temp(0).reg(); \ - intptr_t left_cid = this->left()->Type()->ToCid(); \ - intptr_t right_cid = this->right()->Type()->ToCid(); \ - if (this->left()->definition() == this->right()->definition()) { \ - __ andi(CMPRES1, left, Immediate(kSmiTagMask)); \ - } else if (left_cid == kSmiCid) { \ - __ andi(CMPRES1, right, Immediate(kSmiTagMask)); \ - } else if (right_cid == kSmiCid) { \ - __ andi(CMPRES1, left, Immediate(kSmiTagMask)); \ - } else { \ - __ or_(temp, left, right); \ - __ andi(CMPRES1, temp, Immediate(kSmiTagMask)); \ - } \ - __ bne(CMPRES1, ZR, slow_path->entry_label()); - - -void CheckedSmiComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler, - BranchInstr* branch) { - BranchLabels labels = compiler->CreateBranchLabels(branch); - CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath( - this, compiler->CurrentTryIndex(), labels, - /* merged = */ true); - compiler->AddSlowPathCode(slow_path); - EMIT_SMI_CHECK; - Condition true_condition = EmitComparisonCode(compiler, labels); - ASSERT(true_condition.IsValid()); - EmitBranchOnCondition(compiler, true_condition, labels); - __ Bind(slow_path->exit_label()); -} - - -void CheckedSmiComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Label true_label, false_label, done; - BranchLabels labels = {&true_label, &false_label, &false_label}; - CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath( - this, compiler->CurrentTryIndex(), labels, - /* merged = */ false); - compiler->AddSlowPathCode(slow_path); - EMIT_SMI_CHECK; - Condition true_condition = EmitComparisonCode(compiler, labels); - ASSERT(true_condition.IsValid()); - EmitBranchOnCondition(compiler, true_condition, labels); - Register result = locs()->out(0).reg(); - __ Bind(&false_label); - __ LoadObject(result, Bool::False()); - __ b(&done); - __ Bind(&true_label); - __ LoadObject(result, Bool::True()); - __ Bind(&done); - __ Bind(slow_path->exit_label()); -} - - -LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = - ((op_kind() == Token::kADD) || (op_kind() == Token::kMOD) || - (op_kind() == Token::kTRUNCDIV) || - (((op_kind() == Token::kSHL) && can_overflow()) || - (op_kind() == Token::kSHR))) - ? 1 - : 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - if (op_kind() == Token::kTRUNCDIV) { - summary->set_in(0, Location::RequiresRegister()); - if (RightIsPowerOfTwoConstant()) { - ConstantInstr* right_constant = right()->definition()->AsConstant(); - summary->set_in(1, Location::Constant(right_constant)); - } else { - summary->set_in(1, Location::RequiresRegister()); - } - summary->set_temp(0, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; - } - if (op_kind() == Token::kMOD) { - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RequiresRegister()); - summary->set_temp(0, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; - } - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RegisterOrSmiConstant(right())); - if (((op_kind() == Token::kSHL) && can_overflow()) || - (op_kind() == Token::kSHR)) { - summary->set_temp(0, Location::RequiresRegister()); - } else if (op_kind() == Token::kADD) { - // Need an extra temp for the overflow detection code. - summary->set_temp(0, Location::RequiresRegister()); - } - // We make use of 3-operand instructions by not requiring result register - // to be identical to first input register as on Intel. - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("BinarySmiOpInstr"); - if (op_kind() == Token::kSHL) { - EmitSmiShiftLeft(compiler, this); - return; - } - - Register left = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - Label* deopt = NULL; - if (CanDeoptimize()) { - deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); - } - - if (locs()->in(1).IsConstant()) { - const Object& constant = locs()->in(1).constant(); - ASSERT(constant.IsSmi()); - const int32_t imm = reinterpret_cast(constant.raw()); - switch (op_kind()) { - case Token::kADD: { - if (deopt == NULL) { - __ AddImmediate(result, left, imm); - } else { - Register temp = locs()->temp(0).reg(); - __ AddImmediateDetectOverflow(result, left, imm, CMPRES1, temp); - __ bltz(CMPRES1, deopt); - } - break; - } - case Token::kSUB: { - __ Comment("kSUB imm"); - if (deopt == NULL) { - __ AddImmediate(result, left, -imm); - } else { - __ SubImmediateDetectOverflow(result, left, imm, CMPRES1); - __ bltz(CMPRES1, deopt); - } - break; - } - case Token::kMUL: { - // Keep left value tagged and untag right value. - const intptr_t value = Smi::Cast(constant).Value(); - __ LoadImmediate(TMP, value); - __ mult(left, TMP); - __ mflo(result); - if (deopt != NULL) { - __ mfhi(CMPRES2); - __ sra(CMPRES1, result, 31); - __ bne(CMPRES1, CMPRES2, deopt); - } - break; - } - case Token::kTRUNCDIV: { - const intptr_t value = Smi::Cast(constant).Value(); - ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value))); - const intptr_t shift_count = - Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize; - ASSERT(kSmiTagSize == 1); - __ sra(TMP, left, 31); - ASSERT(shift_count > 1); // 1, -1 case handled above. - Register temp = locs()->temp(0).reg(); - __ srl(TMP, TMP, 32 - shift_count); - __ addu(temp, left, TMP); - ASSERT(shift_count > 0); - __ sra(result, temp, shift_count); - if (value < 0) { - __ subu(result, ZR, result); - } - __ SmiTag(result); - break; - } - case Token::kBIT_AND: { - // No overflow check. - __ AndImmediate(result, left, imm); - break; - } - case Token::kBIT_OR: { - // No overflow check. - __ OrImmediate(result, left, imm); - break; - } - case Token::kBIT_XOR: { - // No overflow check. - __ XorImmediate(result, left, imm); - break; - } - case Token::kSHR: { - // sarl operation masks the count to 5 bits. - const intptr_t kCountLimit = 0x1F; - const intptr_t value = Smi::Cast(constant).Value(); - __ Comment("kSHR"); - __ sra(result, left, Utils::Minimum(value + kSmiTagSize, kCountLimit)); - __ SmiTag(result); - break; - } - - default: - UNREACHABLE(); - break; - } - return; - } - - Register right = locs()->in(1).reg(); - switch (op_kind()) { - case Token::kADD: { - if (deopt == NULL) { - __ addu(result, left, right); - } else { - Register temp = locs()->temp(0).reg(); - __ AdduDetectOverflow(result, left, right, CMPRES1, temp); - __ bltz(CMPRES1, deopt); - } - break; - } - case Token::kSUB: { - __ Comment("kSUB"); - if (deopt == NULL) { - __ subu(result, left, right); - } else { - __ SubuDetectOverflow(result, left, right, CMPRES1); - __ bltz(CMPRES1, deopt); - } - break; - } - case Token::kMUL: { - __ Comment("kMUL"); - __ sra(TMP, left, kSmiTagSize); - __ mult(TMP, right); - __ mflo(result); - if (deopt != NULL) { - __ mfhi(CMPRES2); - __ sra(CMPRES1, result, 31); - __ bne(CMPRES1, CMPRES2, deopt); - } - break; - } - case Token::kBIT_AND: { - // No overflow check. - __ and_(result, left, right); - break; - } - case Token::kBIT_OR: { - // No overflow check. - __ or_(result, left, right); - break; - } - case Token::kBIT_XOR: { - // No overflow check. - __ xor_(result, left, right); - break; - } - case Token::kTRUNCDIV: { - if (RangeUtils::CanBeZero(right_range())) { - // Handle divide by zero in runtime. - __ beq(right, ZR, deopt); - } - Register temp = locs()->temp(0).reg(); - __ SmiUntag(temp, left); - __ SmiUntag(TMP, right); - __ div(temp, TMP); - __ mflo(result); - // Check the corner case of dividing the 'MIN_SMI' with -1, in which - // case we cannot tag the result. - __ BranchEqual(result, Immediate(0x40000000), deopt); - __ SmiTag(result); - break; - } - case Token::kMOD: { - if (RangeUtils::CanBeZero(right_range())) { - // Handle divide by zero in runtime. - __ beq(right, ZR, deopt); - } - Register temp = locs()->temp(0).reg(); - __ SmiUntag(temp, left); - __ SmiUntag(TMP, right); - __ div(temp, TMP); - __ mfhi(result); - // res = left % right; - // if (res < 0) { - // if (right < 0) { - // res = res - right; - // } else { - // res = res + right; - // } - // } - Label done; - __ bgez(result, &done); - if (RangeUtils::Overlaps(right_range(), -1, 1)) { - Label subtract; - __ bltz(right, &subtract); - __ addu(result, result, TMP); - __ b(&done); - __ Bind(&subtract); - __ subu(result, result, TMP); - } else if (right_range()->IsPositive()) { - // Right is positive. - __ addu(result, result, TMP); - } else { - // Right is negative. - __ subu(result, result, TMP); - } - __ Bind(&done); - __ SmiTag(result); - break; - } - case Token::kSHR: { - Register temp = locs()->temp(0).reg(); - if (CanDeoptimize()) { - __ bltz(right, deopt); - } - __ SmiUntag(temp, right); - // sra operation masks the count to 5 bits. - const intptr_t kCountLimit = 0x1F; - if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) { - Label ok; - __ BranchSignedLessEqual(temp, Immediate(kCountLimit), &ok); - __ LoadImmediate(temp, kCountLimit); - __ Bind(&ok); - } - - __ SmiUntag(CMPRES1, left); - __ srav(result, CMPRES1, temp); - __ SmiTag(result); - break; - } - case Token::kDIV: { - // Dispatches to 'Double./'. - // TODO(srdjan): Implement as conversion to double and double division. - UNREACHABLE(); - break; - } - case Token::kOR: - case Token::kAND: { - // Flow graph builder has dissected this operation to guarantee correct - // behavior (short-circuit evaluation). - UNREACHABLE(); - break; - } - default: - UNREACHABLE(); - break; - } -} - - -LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - intptr_t left_cid = left()->Type()->ToCid(); - intptr_t right_cid = right()->Type()->ToCid(); - ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid)); - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RequiresRegister()); - return summary; -} - - -void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Label* deopt = - compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp, - licm_hoisted_ ? ICData::kHoisted : 0); - intptr_t left_cid = left()->Type()->ToCid(); - intptr_t right_cid = right()->Type()->ToCid(); - Register left = locs()->in(0).reg(); - Register right = locs()->in(1).reg(); - if (this->left()->definition() == this->right()->definition()) { - __ andi(CMPRES1, left, Immediate(kSmiTagMask)); - } else if (left_cid == kSmiCid) { - __ andi(CMPRES1, right, Immediate(kSmiTagMask)); - } else if (right_cid == kSmiCid) { - __ andi(CMPRES1, left, Immediate(kSmiTagMask)); - } else { - __ or_(TMP, left, right); - __ andi(CMPRES1, TMP, Immediate(kSmiTagMask)); - } - __ beq(CMPRES1, ZR, deopt); -} - - -LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 1; - LocationSummary* summary = new (zone) LocationSummary( - zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); - summary->set_in(0, Location::RequiresFpuRegister()); - summary->set_temp(0, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(from_representation() == kUnboxedDouble); - - Register out_reg = locs()->out(0).reg(); - DRegister value = locs()->in(0).fpu_reg(); - - BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(), - out_reg, locs()->temp(0).reg()); - __ StoreDToOffset(value, out_reg, Double::value_offset() - kHeapObjectTag); -} - - -LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - if (representation() == kUnboxedMint) { - summary->set_out(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - } else { - summary->set_out(0, Location::RequiresFpuRegister()); - } - return summary; -} - - -void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) { - const Register box = locs()->in(0).reg(); - - switch (representation()) { - case kUnboxedMint: { - PairLocation* result = locs()->out(0).AsPairLocation(); - __ LoadFromOffset(result->At(0).reg(), box, - ValueOffset() - kHeapObjectTag); - __ LoadFromOffset(result->At(1).reg(), box, - ValueOffset() - kHeapObjectTag + kWordSize); - break; - } - - case kUnboxedDouble: { - const DRegister result = locs()->out(0).fpu_reg(); - __ LoadDFromOffset(result, box, Double::value_offset() - kHeapObjectTag); - break; - } - - case kUnboxedFloat32x4: - case kUnboxedFloat64x2: - case kUnboxedInt32x4: { - UNIMPLEMENTED(); - break; - } - - default: - UNREACHABLE(); - break; - } -} - - -void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) { - const Register box = locs()->in(0).reg(); - - switch (representation()) { - case kUnboxedMint: { - PairLocation* result = locs()->out(0).AsPairLocation(); - __ SmiUntag(result->At(0).reg(), box); - __ sra(result->At(1).reg(), result->At(0).reg(), 31); - break; - } - - case kUnboxedDouble: { - const DRegister result = locs()->out(0).fpu_reg(); - __ SmiUntag(TMP, box); - __ mtc1(TMP, STMP1); - __ cvtdw(result, STMP1); - break; - } - - default: - UNREACHABLE(); - break; - } -} - - -void UnboxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - const intptr_t value_cid = value()->Type()->ToCid(); - const intptr_t box_cid = BoxCid(); - - if (value_cid == box_cid) { - EmitLoadFromBox(compiler); - } else if (CanConvertSmi() && (value_cid == kSmiCid)) { - EmitSmiConversion(compiler); - } else { - const Register box = locs()->in(0).reg(); - Label* deopt = - compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptCheckClass); - Label is_smi; - - if ((value()->Type()->ToNullableCid() == box_cid) && - value()->Type()->is_nullable()) { - __ BranchEqual(box, Object::null_object(), deopt); - } else { - __ andi(CMPRES1, box, Immediate(kSmiTagMask)); - __ beq(CMPRES1, ZR, CanConvertSmi() ? &is_smi : deopt); - __ LoadClassId(CMPRES1, box); - __ BranchNotEqual(CMPRES1, Immediate(box_cid), deopt); - } - - EmitLoadFromBox(compiler); - - if (is_smi.IsLinked()) { - Label done; - __ b(&done); - __ Bind(&is_smi); - EmitSmiConversion(compiler); - __ Bind(&done); - } - } -} - - -LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone, - bool opt) const { - ASSERT((from_representation() == kUnboxedInt32) || - (from_representation() == kUnboxedUint32)); - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 1; - LocationSummary* summary = new (zone) LocationSummary( - zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); - summary->set_in(0, Location::RequiresRegister()); - summary->set_temp(0, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register value = locs()->in(0).reg(); - Register out = locs()->out(0).reg(); - ASSERT(value != out); - - __ SmiTag(out, value); - if (!ValueFitsSmi()) { - Register temp = locs()->temp(0).reg(); - Label done; - if (from_representation() == kUnboxedInt32) { - __ SmiUntag(CMPRES1, out); - __ BranchEqual(CMPRES1, value, &done); - } else { - ASSERT(from_representation() == kUnboxedUint32); - __ AndImmediate(CMPRES1, value, 0xC0000000); - __ BranchEqual(CMPRES1, ZR, &done); - } - BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out, - temp); - Register hi; - if (from_representation() == kUnboxedInt32) { - hi = temp; - __ sra(hi, value, kBitsPerWord - 1); - } else { - ASSERT(from_representation() == kUnboxedUint32); - hi = ZR; - } - __ StoreToOffset(value, out, Mint::value_offset() - kHeapObjectTag); - __ StoreToOffset(hi, out, - Mint::value_offset() - kHeapObjectTag + kWordSize); - __ Bind(&done); - } -} - - -LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, - ValueFitsSmi() ? LocationSummary::kNoCall - : LocationSummary::kCallOnSlowPath); - summary->set_in(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - if (!ValueFitsSmi()) { - summary->set_temp(0, Location::RequiresRegister()); - } - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) { - if (ValueFitsSmi()) { - PairLocation* value_pair = locs()->in(0).AsPairLocation(); - Register value_lo = value_pair->At(0).reg(); - Register out_reg = locs()->out(0).reg(); - __ SmiTag(out_reg, value_lo); - return; - } - - PairLocation* value_pair = locs()->in(0).AsPairLocation(); - Register value_lo = value_pair->At(0).reg(); - Register value_hi = value_pair->At(1).reg(); - Register tmp = locs()->temp(0).reg(); - Register out_reg = locs()->out(0).reg(); - - Label not_smi, done; - __ SmiTag(out_reg, value_lo); - __ SmiUntag(tmp, out_reg); - __ bne(tmp, value_lo, ¬_smi); - __ delay_slot()->sra(tmp, out_reg, 31); - __ beq(tmp, value_hi, &done); - - __ Bind(¬_smi); - BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), - out_reg, tmp); - __ StoreToOffset(value_lo, out_reg, Mint::value_offset() - kHeapObjectTag); - __ StoreToOffset(value_hi, out_reg, - Mint::value_offset() - kHeapObjectTag + kWordSize); - __ Bind(&done); -} - - -LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone, - bool opt) const { - ASSERT((representation() == kUnboxedInt32) || - (representation() == kUnboxedUint32)); - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -static void LoadInt32FromMint(FlowGraphCompiler* compiler, - Register mint, - Register result, - Label* deopt) { - __ LoadFieldFromOffset(result, mint, Mint::value_offset()); - if (deopt != NULL) { - __ LoadFieldFromOffset(CMPRES1, mint, Mint::value_offset() + kWordSize); - __ sra(CMPRES2, result, kBitsPerWord - 1); - __ BranchNotEqual(CMPRES1, CMPRES2, deopt); - } -} - - -void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { - const intptr_t value_cid = value()->Type()->ToCid(); - const Register value = locs()->in(0).reg(); - const Register out = locs()->out(0).reg(); - Label* deopt = - CanDeoptimize() - ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger) - : NULL; - Label* out_of_range = !is_truncating() ? deopt : NULL; - ASSERT(value != out); - - if (value_cid == kSmiCid) { - __ SmiUntag(out, value); - } else if (value_cid == kMintCid) { - LoadInt32FromMint(compiler, value, out, out_of_range); - } else if (!CanDeoptimize()) { - Label done; - __ SmiUntag(out, value); - __ andi(CMPRES1, value, Immediate(kSmiTagMask)); - __ beq(CMPRES1, ZR, &done); - LoadInt32FromMint(compiler, value, out, NULL); - __ Bind(&done); - } else { - Label done; - __ SmiUntag(out, value); - __ andi(CMPRES1, value, Immediate(kSmiTagMask)); - __ beq(CMPRES1, ZR, &done); - __ LoadClassId(CMPRES1, value); - __ BranchNotEqual(CMPRES1, Immediate(kMintCid), deopt); - LoadInt32FromMint(compiler, value, out, out_of_range); - __ Bind(&done); - } -} - - -LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresFpuRegister()); - summary->set_in(1, Location::RequiresFpuRegister()); - summary->set_out(0, Location::RequiresFpuRegister()); - return summary; -} - - -void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - DRegister left = locs()->in(0).fpu_reg(); - DRegister right = locs()->in(1).fpu_reg(); - DRegister result = locs()->out(0).fpu_reg(); - switch (op_kind()) { - case Token::kADD: - __ addd(result, left, right); - break; - case Token::kSUB: - __ subd(result, left, right); - break; - case Token::kMUL: - __ muld(result, left, right); - break; - case Token::kDIV: - __ divd(result, left, right); - break; - default: - UNREACHABLE(); - } -} - - -LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresFpuRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, - BranchLabels labels) { - const DRegister value = locs()->in(0).fpu_reg(); - const bool is_negated = kind() != Token::kEQ; - if (op_kind() == MethodRecognizer::kDouble_getIsNaN) { - __ cund(value, value); - if (labels.fall_through == labels.true_label) { - if (is_negated) { - __ bc1t(labels.false_label); - } else { - __ bc1f(labels.false_label); - } - } else if (labels.fall_through == labels.false_label) { - if (is_negated) { - __ bc1f(labels.true_label); - } else { - __ bc1t(labels.true_label); - } - } else { - if (is_negated) { - __ bc1t(labels.false_label); - } else { - __ bc1f(labels.false_label); - } - __ b(labels.true_label); - } - return Condition(ZR, ZR, INVALID_RELATION); // Unused. - } else { - ASSERT(op_kind() == MethodRecognizer::kDouble_getIsInfinite); - __ mfc1(CMPRES1, EvenFRegisterOf(value)); - // If the low word isn't zero, then it isn't infinity. - __ bne(CMPRES1, ZR, is_negated ? labels.true_label : labels.false_label); - __ mfc1(CMPRES1, OddFRegisterOf(value)); - // Mask off the sign bit. - __ AndImmediate(CMPRES1, CMPRES1, 0x7FFFFFFF); - // Compare with +infinity. - __ LoadImmediate(CMPRES2, 0x7FF00000); - return Condition(CMPRES1, CMPRES2, is_negated ? NE : EQ); - } -} - -LocationSummary* BinaryFloat32x4OpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void BinaryFloat32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* BinaryFloat64x2OpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void BinaryFloat64x2OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Simd32x4ShuffleInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Simd32x4ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Simd32x4ShuffleMixInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Simd32x4ShuffleMixInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4ConstructorInstr::MakeLocationSummary( - Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4ZeroInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4SplatInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4ComparisonInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4MinMaxInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4MinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4SqrtInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4SqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4ScaleInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4ScaleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4ZeroArgInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4ClampInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4ClampInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4WithInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4WithInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4ToInt32x4Instr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4ToInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Simd64x2ShuffleInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Simd64x2ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float64x2ZeroInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float64x2ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float64x2SplatInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float64x2SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float64x2ConstructorInstr::MakeLocationSummary( - Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float64x2ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float64x2ToFloat32x4Instr::MakeLocationSummary( - Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float64x2ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float32x4ToFloat64x2Instr::MakeLocationSummary( - Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float32x4ToFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float64x2ZeroArgInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float64x2ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Float64x2OneArgInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Float64x2OneArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Int32x4ConstructorInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Int32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Int32x4BoolConstructorInstr::MakeLocationSummary( - Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Int32x4BoolConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Int32x4GetFlagInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Int32x4GetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Simd32x4GetSignMaskInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Simd32x4GetSignMaskInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Int32x4SelectInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Int32x4SelectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Int32x4SetFlagInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Int32x4SetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* Int32x4ToFloat32x4Instr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void Int32x4ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* BinaryInt32x4OpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void BinaryInt32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - ASSERT((kind() == MathUnaryInstr::kSqrt) || - (kind() == MathUnaryInstr::kDoubleSquare)); - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresFpuRegister()); - summary->set_out(0, Location::RequiresFpuRegister()); - return summary; -} - - -void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - if (kind() == MathUnaryInstr::kSqrt) { - __ sqrtd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg()); - } else if (kind() == MathUnaryInstr::kDoubleSquare) { - DRegister val = locs()->in(0).fpu_reg(); - DRegister result = locs()->out(0).fpu_reg(); - __ muld(result, val, val); - } else { - UNREACHABLE(); - } -} - - -LocationSummary* CaseInsensitiveCompareUC16Instr::MakeLocationSummary( - Zone* zone, - bool opt) const { - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); - summary->set_in(0, Location::RegisterLocation(A0)); - summary->set_in(1, Location::RegisterLocation(A1)); - summary->set_in(2, Location::RegisterLocation(A2)); - summary->set_in(3, Location::RegisterLocation(A3)); - summary->set_out(0, Location::RegisterLocation(V0)); - return summary; -} - - -void CaseInsensitiveCompareUC16Instr::EmitNativeCode( - FlowGraphCompiler* compiler) { - // Call the function. - __ CallRuntime(TargetFunction(), TargetFunction().argument_count()); -} - - -LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - if (result_cid() == kDoubleCid) { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 1; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresFpuRegister()); - summary->set_in(1, Location::RequiresFpuRegister()); - // Reuse the left register so that code can be made shorter. - summary->set_out(0, Location::SameAsFirstInput()); - summary->set_temp(0, Location::RequiresRegister()); - return summary; - } - ASSERT(result_cid() == kSmiCid); - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RequiresRegister()); - // Reuse the left register so that code can be made shorter. - summary->set_out(0, Location::SameAsFirstInput()); - return summary; -} - - -void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT((op_kind() == MethodRecognizer::kMathMin) || - (op_kind() == MethodRecognizer::kMathMax)); - const intptr_t is_min = (op_kind() == MethodRecognizer::kMathMin); - if (result_cid() == kDoubleCid) { - Label done, returns_nan, are_equal; - DRegister left = locs()->in(0).fpu_reg(); - DRegister right = locs()->in(1).fpu_reg(); - DRegister result = locs()->out(0).fpu_reg(); - Register temp = locs()->temp(0).reg(); - __ cund(left, right); - __ bc1t(&returns_nan); - __ ceqd(left, right); - __ bc1t(&are_equal); - if (is_min) { - __ coltd(left, right); - } else { - __ coltd(right, left); - } - // TODO(zra): Add conditional moves. - ASSERT(left == result); - __ bc1t(&done); - __ movd(result, right); - __ b(&done); - - __ Bind(&returns_nan); - __ LoadImmediate(result, NAN); - __ b(&done); - - __ Bind(&are_equal); - Label left_is_negative; - // Check for negative zero: -0.0 is equal 0.0 but min or max must return - // -0.0 or 0.0 respectively. - // Check for negative left value (get the sign bit): - // - min -> left is negative ? left : right. - // - max -> left is negative ? right : left - // Check the sign bit. - __ mfc1(temp, OddFRegisterOf(left)); // Moves bits 32...63 of left to temp. - if (is_min) { - ASSERT(left == result); - __ bltz(temp, &done); // Left is negative. - } else { - __ bgez(temp, &done); // Left is positive. - } - __ movd(result, right); - __ Bind(&done); - return; - } - - Label done; - ASSERT(result_cid() == kSmiCid); - Register left = locs()->in(0).reg(); - Register right = locs()->in(1).reg(); - Register result = locs()->out(0).reg(); - ASSERT(result == left); - if (is_min) { - __ BranchSignedLessEqual(left, right, &done); - } else { - __ BranchSignedGreaterEqual(left, right, &done); - } - __ mov(result, right); - __ Bind(&done); -} - - -LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - // We make use of 3-operand instructions by not requiring result register - // to be identical to first input register as on Intel. - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register value = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - switch (op_kind()) { - case Token::kNEGATE: { - Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp); - __ SubuDetectOverflow(result, ZR, value, CMPRES1); - __ bltz(CMPRES1, deopt); - break; - } - case Token::kBIT_NOT: - __ nor(result, value, ZR); - __ addiu(result, result, Immediate(-1)); // Remove inverted smi-tag. - break; - default: - UNREACHABLE(); - } -} - - -LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresFpuRegister()); - summary->set_out(0, Location::RequiresFpuRegister()); - return summary; -} - - -void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - FpuRegister result = locs()->out(0).fpu_reg(); - FpuRegister value = locs()->in(0).fpu_reg(); - __ negd(result, value); -} - - -LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* result = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - result->set_in(0, Location::RequiresRegister()); - result->set_out(0, Location::RequiresFpuRegister()); - return result; -} - - -void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register value = locs()->in(0).reg(); - FpuRegister result = locs()->out(0).fpu_reg(); - __ mtc1(value, STMP1); - __ cvtdw(result, STMP1); -} - - -LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* result = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - result->set_in(0, Location::RequiresRegister()); - result->set_out(0, Location::RequiresFpuRegister()); - return result; -} - - -void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register value = locs()->in(0).reg(); - FpuRegister result = locs()->out(0).fpu_reg(); - __ SmiUntag(TMP, value); - __ mtc1(TMP, STMP1); - __ cvtdw(result, STMP1); -} - - -LocationSummary* MintToDoubleInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void MintToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* result = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - result->set_in(0, Location::RegisterLocation(T1)); - result->set_out(0, Location::RegisterLocation(V0)); - return result; -} - - -void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register result = locs()->out(0).reg(); - Register value_obj = locs()->in(0).reg(); - ASSERT(result == V0); - ASSERT(result != value_obj); - __ LoadDFromOffset(DTMP, value_obj, Double::value_offset() - kHeapObjectTag); - __ truncwd(STMP1, DTMP); - __ mfc1(result, STMP1); - - // Overflow is signaled with minint. - Label do_call, done; - // Check for overflow and that it fits into Smi. - __ LoadImmediate(TMP, 0xC0000000); - __ subu(CMPRES1, result, TMP); - __ bltz(CMPRES1, &do_call); - __ SmiTag(result); - __ b(&done); - __ Bind(&do_call); - __ Push(value_obj); - ASSERT(instance_call()->HasICData()); - const ICData& ic_data = *instance_call()->ic_data(); - ASSERT(ic_data.NumberOfChecksIs(1)); - const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0)); - const int kTypeArgsLen = 0; - const int kNumberOfArguments = 1; - const Array& kNoArgumentNames = Object::null_array(); - ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kNoArgumentNames); - compiler->GenerateStaticCall(deopt_id(), instance_call()->token_pos(), target, - args_info, locs(), ICData::Handle()); - __ Bind(&done); -} - - -LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* result = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - result->set_in(0, Location::RequiresFpuRegister()); - result->set_out(0, Location::RequiresRegister()); - return result; -} - - -void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi); - Register result = locs()->out(0).reg(); - DRegister value = locs()->in(0).fpu_reg(); - __ truncwd(STMP1, value); - __ mfc1(result, STMP1); - - // Check for overflow and that it fits into Smi. - __ LoadImmediate(TMP, 0xC0000000); - __ subu(CMPRES1, result, TMP); - __ bltz(CMPRES1, deopt); - __ SmiTag(result); -} - - -LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - UNIMPLEMENTED(); - return NULL; -} - - -void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - UNIMPLEMENTED(); -} - - -LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* result = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - result->set_in(0, Location::RequiresFpuRegister()); - result->set_out(0, Location::SameAsFirstInput()); - return result; -} - - -void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - DRegister value = locs()->in(0).fpu_reg(); - FRegister result = EvenFRegisterOf(locs()->out(0).fpu_reg()); - __ cvtsd(result, value); -} - - -LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* result = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - result->set_in(0, Location::RequiresFpuRegister()); - result->set_out(0, Location::SameAsFirstInput()); - return result; -} - - -void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - FRegister value = EvenFRegisterOf(locs()->in(0).fpu_reg()); - DRegister result = locs()->out(0).fpu_reg(); - __ cvtds(result, value); -} - - -LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - // Calling convention on MIPS uses D6 and D7 to pass the first two - // double arguments. - ASSERT((InputCount() == 1) || (InputCount() == 2)); - const intptr_t kNumTemps = 0; - LocationSummary* result = new (zone) - LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); - result->set_in(0, Location::FpuRegisterLocation(D6)); - if (InputCount() == 2) { - result->set_in(1, Location::FpuRegisterLocation(D7)); - } - result->set_out(0, Location::FpuRegisterLocation(D0)); - return result; -} - - -// Pseudo code: -// if (exponent == 0.0) return 1.0; -// // Speed up simple cases. -// if (exponent == 1.0) return base; -// if (exponent == 2.0) return base * base; -// if (exponent == 3.0) return base * base * base; -// if (base == 1.0) return 1.0; -// if (base.isNaN || exponent.isNaN) { -// return double.NAN; -// } -// if (base != -Infinity && exponent == 0.5) { -// if (base == 0.0) return 0.0; -// return sqrt(value); -// } -// TODO(srdjan): Move into a stub? -static void InvokeDoublePow(FlowGraphCompiler* compiler, - InvokeMathCFunctionInstr* instr) { - ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow); - const intptr_t kInputCount = 2; - ASSERT(instr->InputCount() == kInputCount); - LocationSummary* locs = instr->locs(); - - DRegister base = locs->in(0).fpu_reg(); - DRegister exp = locs->in(1).fpu_reg(); - DRegister result = locs->out(0).fpu_reg(); - - Label check_base, skip_call; - __ LoadImmediate(DTMP, 0.0); - __ LoadImmediate(result, 1.0); - // exponent == 0.0 -> return 1.0; - __ cund(exp, exp); - __ bc1t(&check_base); // NaN -> check base. - __ ceqd(exp, DTMP); - __ bc1t(&skip_call); // exp is 0.0, result is 1.0. - - // exponent == 1.0 ? - __ ceqd(exp, result); - Label return_base; - __ bc1t(&return_base); - // exponent == 2.0 ? - __ LoadImmediate(DTMP, 2.0); - __ ceqd(exp, DTMP); - Label return_base_times_2; - __ bc1t(&return_base_times_2); - // exponent == 3.0 ? - __ LoadImmediate(DTMP, 3.0); - __ ceqd(exp, DTMP); - __ bc1f(&check_base); - - // base_times_3. - __ muld(result, base, base); - __ muld(result, result, base); - __ b(&skip_call); - - __ Bind(&return_base); - __ movd(result, base); - __ b(&skip_call); - - __ Bind(&return_base_times_2); - __ muld(result, base, base); - __ b(&skip_call); - - __ Bind(&check_base); - // Note: 'exp' could be NaN. - // base == 1.0 -> return 1.0; - __ cund(base, base); - Label return_nan; - __ bc1t(&return_nan); - __ ceqd(base, result); - __ bc1t(&skip_call); // base and result are 1.0. - - __ cund(exp, exp); - Label try_sqrt; - __ bc1f(&try_sqrt); // Neither 'exp' nor 'base' are NaN. - - __ Bind(&return_nan); - __ LoadImmediate(result, NAN); - __ b(&skip_call); - - __ Bind(&try_sqrt); - // Before calling pow, check if we could use sqrt instead of pow. - __ LoadImmediate(result, kNegInfinity); - // base == -Infinity -> call pow; - __ ceqd(base, result); - Label do_pow; - __ bc1t(&do_pow); - - // exponent == 0.5 ? - __ LoadImmediate(result, 0.5); - __ ceqd(exp, result); - __ bc1f(&do_pow); - - // base == 0 -> return 0; - __ LoadImmediate(DTMP, 0.0); - __ ceqd(base, DTMP); - Label return_zero; - __ bc1t(&return_zero); - - __ sqrtd(result, base); - __ b(&skip_call); - - __ Bind(&return_zero); - __ movd(result, DTMP); - __ b(&skip_call); - - __ Bind(&do_pow); - - // double values are passed and returned in vfp registers. - __ CallRuntime(instr->TargetFunction(), kInputCount); - __ Bind(&skip_call); -} - - -void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - // For pow-function return NaN if exponent is NaN. - if (recognized_kind() == MethodRecognizer::kMathDoublePow) { - InvokeDoublePow(compiler, this); - return; - } - // double values are passed and returned in vfp registers. - __ CallRuntime(TargetFunction(), InputCount()); -} - - -LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - // Only use this instruction in optimized code. - ASSERT(opt); - const intptr_t kNumInputs = 1; - LocationSummary* summary = - new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); - if (representation() == kUnboxedDouble) { - if (index() == 0) { - summary->set_in( - 0, Location::Pair(Location::RequiresFpuRegister(), Location::Any())); - } else { - ASSERT(index() == 1); - summary->set_in( - 0, Location::Pair(Location::Any(), Location::RequiresFpuRegister())); - } - summary->set_out(0, Location::RequiresFpuRegister()); - } else { - ASSERT(representation() == kTagged); - if (index() == 0) { - summary->set_in( - 0, Location::Pair(Location::RequiresRegister(), Location::Any())); - } else { - ASSERT(index() == 1); - summary->set_in( - 0, Location::Pair(Location::Any(), Location::RequiresRegister())); - } - summary->set_out(0, Location::RequiresRegister()); - } - return summary; -} - - -void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(locs()->in(0).IsPairLocation()); - PairLocation* pair = locs()->in(0).AsPairLocation(); - Location in_loc = pair->At(index()); - if (representation() == kUnboxedDouble) { - DRegister out = locs()->out(0).fpu_reg(); - DRegister in = in_loc.fpu_reg(); - __ movd(out, in); - } else { - ASSERT(representation() == kTagged); - Register out = locs()->out(0).reg(); - Register in = in_loc.reg(); - __ mov(out, in); - } -} - - -LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 1; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RequiresRegister()); - summary->set_temp(0, Location::RequiresRegister()); - // Output is a pair of registers. - summary->set_out(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - return summary; -} - - -void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(CanDeoptimize()); - Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); - Register left = locs()->in(0).reg(); - Register right = locs()->in(1).reg(); - Register temp = locs()->temp(0).reg(); - ASSERT(locs()->out(0).IsPairLocation()); - PairLocation* pair = locs()->out(0).AsPairLocation(); - Register result_div = pair->At(0).reg(); - Register result_mod = pair->At(1).reg(); - if (RangeUtils::CanBeZero(divisor_range())) { - // Handle divide by zero in runtime. - __ beq(right, ZR, deopt); - } - __ SmiUntag(temp, left); - __ SmiUntag(TMP, right); - __ div(temp, TMP); - __ mflo(result_div); - __ mfhi(result_mod); - // Check the corner case of dividing the 'MIN_SMI' with -1, in which - // case we cannot tag the result. - __ BranchEqual(result_div, Immediate(0x40000000), deopt); - // res = left % right; - // if (res < 0) { - // if (right < 0) { - // res = res - right; - // } else { - // res = res + right; - // } - // } - Label done; - __ bgez(result_mod, &done); - if (RangeUtils::Overlaps(divisor_range(), -1, 1)) { - Label subtract; - __ bltz(right, &subtract); - __ addu(result_mod, result_mod, TMP); - __ b(&done); - __ Bind(&subtract); - __ subu(result_mod, result_mod, TMP); - } else if (divisor_range()->IsPositive()) { - // Right is positive. - __ addu(result_mod, result_mod, TMP); - } else { - // Right is negative. - __ subu(result_mod, result_mod, TMP); - } - __ Bind(&done); - - __ SmiTag(result_div); - __ SmiTag(result_mod); -} - - -LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary( - Zone* zone, - bool opt) const { - return MakeCallSummary(zone); -} - - -LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const { - comparison()->InitializeLocationSummary(zone, opt); - // Branches don't produce a result. - comparison()->locs()->set_out(0, Location::NoLocation()); - return comparison()->locs(); -} - - -void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("BranchInstr"); - comparison()->EmitBranchCode(compiler, this); -} - - -LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const bool need_mask_temp = IsBitTest(); - const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - if (!IsNullCheck()) { - summary->set_temp(0, Location::RequiresRegister()); - if (need_mask_temp) { - summary->set_temp(1, Location::RequiresRegister()); - } - } - return summary; -} - - -void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler, Label* deopt) { - if (IsDeoptIfNull()) { - __ BranchEqual(locs()->in(0).reg(), Object::null_object(), deopt); - } else { - ASSERT(IsDeoptIfNotNull()); - __ BranchNotEqual(locs()->in(0).reg(), Object::null_object(), deopt); - } -} - - -void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler, - intptr_t min, - intptr_t max, - intptr_t mask, - Label* deopt) { - Register biased_cid = locs()->temp(0).reg(); - __ LoadImmediate(TMP, min); - __ subu(biased_cid, biased_cid, TMP); - __ LoadImmediate(TMP, max - min); - __ BranchUnsignedGreater(biased_cid, TMP, deopt); - - Register bit_reg = locs()->temp(1).reg(); - __ LoadImmediate(bit_reg, 1); - __ sllv(bit_reg, bit_reg, biased_cid); - __ AndImmediate(bit_reg, bit_reg, mask); - __ beq(bit_reg, ZR, deopt); -} - - -int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler, - int bias, - intptr_t cid_start, - intptr_t cid_end, - bool is_last, - Label* is_ok, - Label* deopt, - bool use_near_jump) { - Register biased_cid = locs()->temp(0).reg(); - if (cid_start == cid_end) { - __ LoadImmediate(TMP, cid_start - bias); - if (is_last) { - __ bne(biased_cid, TMP, deopt); - } else { - __ beq(biased_cid, TMP, is_ok); - } - } else { - // For class ID ranges use a subtract followed by an unsigned - // comparison to check both ends of the ranges with one comparison. - __ AddImmediate(biased_cid, biased_cid, bias - cid_start); - bias = cid_start; - // TODO(erikcorry): We should use sltiu instead of the temporary TMP if - // the range is small enough. - __ LoadImmediate(TMP, cid_end - cid_start); - // Reverse comparison so we get 1 if biased_cid > tmp ie cid is out of - // range. - __ sltu(TMP, TMP, biased_cid); - if (is_last) { - __ bne(TMP, ZR, deopt); - } else { - __ beq(TMP, ZR, is_ok); - } - } - return bias; -} - - -LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - return summary; -} - - -void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("CheckSmiInstr"); - Register value = locs()->in(0).reg(); - Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi, - licm_hoisted_ ? ICData::kHoisted : 0); - __ BranchIfNotSmi(value, deopt); -} - - -LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister() - : Location::WritableRegister()); - - return summary; -} - - -void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register value = locs()->in(0).reg(); - Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass); - if (cids_.IsSingleCid()) { - __ BranchNotEqual(value, Immediate(Smi::RawValue(cids_.cid_start)), deopt); - } else { - __ AddImmediate(value, value, -Smi::RawValue(cids_.cid_start)); - // TODO(erikcorry): We should use sltiu instead of the temporary TMP if - // the range is small enough. - __ LoadImmediate(TMP, cids_.Extent()); - // Reverse comparison so we get 1 if biased_cid > tmp ie cid is out of - // range. - __ sltu(TMP, TMP, value); - __ bne(TMP, ZR, deopt); - } -} - - -LocationSummary* GenericCheckBoundInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) LocationSummary( - zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); - locs->set_in(kLengthPos, Location::RequiresRegister()); - locs->set_in(kIndexPos, Location::RequiresRegister()); - return locs; -} - - -class RangeErrorSlowPath : public SlowPathCode { - public: - RangeErrorSlowPath(GenericCheckBoundInstr* instruction, intptr_t try_index) - : instruction_(instruction), try_index_(try_index) {} - - virtual void EmitNativeCode(FlowGraphCompiler* compiler) { - if (Assembler::EmittingComments()) { - __ Comment("slow path check bound operation"); - } - __ Bind(entry_label()); - LocationSummary* locs = instruction_->locs(); - compiler->SaveLiveRegisters(locs); - __ Push(locs->in(0).reg()); - __ Push(locs->in(1).reg()); - __ CallRuntime(kRangeErrorRuntimeEntry, 2); - compiler->AddDescriptor( - RawPcDescriptors::kOther, compiler->assembler()->CodeSize(), - instruction_->deopt_id(), instruction_->token_pos(), try_index_); - Environment* env = compiler->SlowPathEnvironmentFor(instruction_); - compiler->EmitCatchEntryState(env, try_index_); - __ break_(0); - } - - private: - GenericCheckBoundInstr* instruction_; - intptr_t try_index_; -}; - - -void GenericCheckBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - RangeErrorSlowPath* slow_path = - new RangeErrorSlowPath(this, compiler->CurrentTryIndex()); - compiler->AddSlowPathCode(slow_path); - - Location length_loc = locs()->in(kLengthPos); - Location index_loc = locs()->in(kIndexPos); - Register length = length_loc.reg(); - Register index = index_loc.reg(); - const intptr_t index_cid = this->index()->Type()->ToCid(); - if (index_cid != kSmiCid) { - __ BranchIfNotSmi(index, slow_path->entry_label()); - } - __ BranchUnsignedGreaterEqual(index, length, slow_path->entry_label()); -} - - -LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(kLengthPos, Location::RegisterOrSmiConstant(length())); - locs->set_in(kIndexPos, Location::RegisterOrSmiConstant(index())); - return locs; -} - - -void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - uint32_t flags = generalized_ ? ICData::kGeneralized : 0; - flags |= licm_hoisted_ ? ICData::kHoisted : 0; - Label* deopt = - compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags); - - Location length_loc = locs()->in(kLengthPos); - Location index_loc = locs()->in(kIndexPos); - - if (length_loc.IsConstant() && index_loc.IsConstant()) { - ASSERT((Smi::Cast(length_loc.constant()).Value() <= - Smi::Cast(index_loc.constant()).Value()) || - (Smi::Cast(index_loc.constant()).Value() < 0)); - // Unconditionally deoptimize for constant bounds checks because they - // only occur only when index is out-of-bounds. - __ b(deopt); - return; - } - - const intptr_t index_cid = index()->Type()->ToCid(); - if (index_loc.IsConstant()) { - Register length = length_loc.reg(); - const Smi& index = Smi::Cast(index_loc.constant()); - __ BranchUnsignedLessEqual( - length, Immediate(reinterpret_cast(index.raw())), deopt); - } else if (length_loc.IsConstant()) { - const Smi& length = Smi::Cast(length_loc.constant()); - Register index = index_loc.reg(); - if (index_cid != kSmiCid) { - __ BranchIfNotSmi(index, deopt); - } - if (length.Value() == Smi::kMaxValue) { - __ BranchSignedLess(index, Immediate(0), deopt); - } else { - __ BranchUnsignedGreaterEqual( - index, Immediate(reinterpret_cast(length.raw())), deopt); - } - } else { - Register length = length_loc.reg(); - Register index = index_loc.reg(); - if (index_cid != kSmiCid) { - __ BranchIfNotSmi(index, deopt); - } - __ BranchUnsignedGreaterEqual(index, length, deopt); - } -} - -LocationSummary* BinaryMintOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - summary->set_in(1, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - summary->set_out(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - return summary; -} - - -void BinaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - PairLocation* left_pair = locs()->in(0).AsPairLocation(); - Register left_lo = left_pair->At(0).reg(); - Register left_hi = left_pair->At(1).reg(); - PairLocation* right_pair = locs()->in(1).AsPairLocation(); - Register right_lo = right_pair->At(0).reg(); - Register right_hi = right_pair->At(1).reg(); - PairLocation* out_pair = locs()->out(0).AsPairLocation(); - Register out_lo = out_pair->At(0).reg(); - Register out_hi = out_pair->At(1).reg(); - - Label* deopt = NULL; - if (CanDeoptimize()) { - deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); - } - switch (op_kind()) { - case Token::kBIT_AND: { - __ and_(out_lo, left_lo, right_lo); - __ and_(out_hi, left_hi, right_hi); - break; - } - case Token::kBIT_OR: { - __ or_(out_lo, left_lo, right_lo); - __ or_(out_hi, left_hi, right_hi); - break; - } - case Token::kBIT_XOR: { - __ xor_(out_lo, left_lo, right_lo); - __ xor_(out_hi, left_hi, right_hi); - break; - } - case Token::kADD: - case Token::kSUB: { - if (op_kind() == Token::kADD) { - __ addu(out_lo, left_lo, right_lo); - __ sltu(TMP, out_lo, left_lo); // TMP = carry of left_lo + right_lo. - __ addu(out_hi, left_hi, right_hi); - __ addu(out_hi, out_hi, TMP); - if (can_overflow()) { - __ xor_(CMPRES1, out_hi, left_hi); - __ xor_(TMP, out_hi, right_hi); - __ and_(CMPRES1, TMP, CMPRES1); - __ bltz(CMPRES1, deopt); - } - } else { - __ subu(out_lo, left_lo, right_lo); - __ sltu(TMP, left_lo, out_lo); // TMP = borrow of left_lo - right_lo. - __ subu(out_hi, left_hi, right_hi); - __ subu(out_hi, out_hi, TMP); - if (can_overflow()) { - __ xor_(CMPRES1, out_hi, left_hi); - __ xor_(TMP, left_hi, right_hi); - __ and_(CMPRES1, TMP, CMPRES1); - __ bltz(CMPRES1, deopt); - } - } - break; - } - case Token::kMUL: { - // The product of two signed 32-bit integers fits in a signed 64-bit - // result without causing overflow. - // We deopt on larger inputs. - // TODO(regis): Range analysis may eliminate the deopt check. - __ sra(CMPRES1, left_lo, 31); - __ bne(CMPRES1, left_hi, deopt); - __ delay_slot()->sra(CMPRES2, right_lo, 31); - __ bne(CMPRES2, right_hi, deopt); - __ delay_slot()->mult(left_lo, right_lo); - __ mflo(out_lo); - __ mfhi(out_hi); - break; - } - default: - UNREACHABLE(); - } -} - - -LocationSummary* ShiftMintOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - summary->set_in(1, Location::WritableRegisterOrSmiConstant(right())); - summary->set_out(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - return summary; -} - - -void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - PairLocation* left_pair = locs()->in(0).AsPairLocation(); - Register left_lo = left_pair->At(0).reg(); - Register left_hi = left_pair->At(1).reg(); - PairLocation* out_pair = locs()->out(0).AsPairLocation(); - Register out_lo = out_pair->At(0).reg(); - Register out_hi = out_pair->At(1).reg(); - - Label* deopt = NULL; - if (CanDeoptimize()) { - deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); - } - if (locs()->in(1).IsConstant()) { - // Code for a constant shift amount. - ASSERT(locs()->in(1).constant().IsSmi()); - const int32_t shift = - reinterpret_cast(locs()->in(1).constant().raw()) >> 1; - switch (op_kind()) { - case Token::kSHR: { - if (shift < 32) { - __ sll(out_lo, left_hi, 32 - shift); - __ srl(TMP, left_lo, shift); - __ or_(out_lo, out_lo, TMP); - __ sra(out_hi, left_hi, shift); - } else { - if (shift == 32) { - __ mov(out_lo, left_hi); - } else if (shift < 64) { - __ sra(out_lo, left_hi, shift - 32); - } else { - __ sra(out_lo, left_hi, 31); - } - __ sra(out_hi, left_hi, 31); - } - break; - } - case Token::kSHL: { - ASSERT(shift < 64); - if (shift < 32) { - __ srl(out_hi, left_lo, 32 - shift); - __ sll(TMP, left_hi, shift); - __ or_(out_hi, out_hi, TMP); - __ sll(out_lo, left_lo, shift); - } else { - __ sll(out_hi, left_lo, shift - 32); - __ mov(out_lo, ZR); - } - // Check for overflow. - if (can_overflow()) { - // Compare high word from input with shifted high word from output. - // Overflow if they aren't equal. - // If shift > 32, also compare low word from input with high word from - // output shifted back shift - 32. - if (shift > 32) { - __ sra(TMP, out_hi, shift - 32); - __ bne(left_lo, TMP, deopt); - __ delay_slot()->sra(TMP, out_hi, 31); - } else if (shift == 32) { - __ sra(TMP, out_hi, 31); - } else { - __ sra(TMP, out_hi, shift); - } - __ bne(left_hi, TMP, deopt); - } - break; - } - default: - UNREACHABLE(); - } - } else { - // Code for a variable shift amount. - Register shift = locs()->in(1).reg(); - - // Code below assumes shift amount is not 0 (cannot shift by 32 - 0). - Label non_zero_shift, done; - __ bne(shift, ZR, &non_zero_shift); - __ delay_slot()->mov(out_lo, left_lo); - __ b(&done); - __ delay_slot()->mov(out_hi, left_hi); - __ Bind(&non_zero_shift); - - // Deopt if shift is larger than 63 or less than 0. - if (has_shift_count_check()) { - __ sltiu(CMPRES1, shift, Immediate(2 * (kMintShiftCountLimit + 1))); - __ beq(CMPRES1, ZR, deopt); - // Untag shift count. - __ delay_slot()->SmiUntag(shift); - } else { - // Untag shift count. - __ SmiUntag(shift); - } - - switch (op_kind()) { - case Token::kSHR: { - Label large_shift; - __ sltiu(CMPRES1, shift, Immediate(32)); - __ beq(CMPRES1, ZR, &large_shift); - - // 0 < shift < 32. - __ delay_slot()->ori(TMP, ZR, Immediate(32)); - __ subu(TMP, TMP, shift); // TMP = 32 - shift; 0 < TMP <= 31. - __ sllv(out_lo, left_hi, TMP); - __ srlv(TMP, left_lo, shift); - __ or_(out_lo, out_lo, TMP); - __ b(&done); - __ delay_slot()->srav(out_hi, left_hi, shift); - - // shift >= 32. - __ Bind(&large_shift); - __ sra(out_hi, left_hi, 31); - __ srav(out_lo, left_hi, shift); // Only 5 low bits of shift used. - - break; - } - case Token::kSHL: { - Label large_shift; - __ sltiu(CMPRES1, shift, Immediate(32)); - __ beq(CMPRES1, ZR, &large_shift); - - // 0 < shift < 32. - __ delay_slot()->ori(TMP, ZR, Immediate(32)); - __ subu(TMP, TMP, shift); // TMP = 32 - shift; 0 < TMP <= 31. - __ srlv(out_hi, left_lo, TMP); - __ sllv(TMP, left_hi, shift); - __ or_(out_hi, out_hi, TMP); - // Check for overflow. - if (can_overflow()) { - // Compare high word from input with shifted high word from output. - __ srav(TMP, out_hi, shift); - __ beq(TMP, left_hi, &done); - __ delay_slot()->sllv(out_lo, left_lo, shift); - __ b(deopt); - } else { - __ b(&done); - __ delay_slot()->sllv(out_lo, left_lo, shift); - } - - // shift >= 32. - __ Bind(&large_shift); - __ sllv(out_hi, left_lo, shift); // Only 5 low bits of shift used. - // Check for overflow. - if (can_overflow()) { - // Compare low word from input with shifted high word from output and - // high word from input to sign of output. - // Overflow if they aren't equal. - __ srav(TMP, out_hi, shift); - __ bne(TMP, left_lo, deopt); - __ delay_slot()->sra(TMP, out_hi, 31); - __ bne(TMP, left_hi, deopt); - __ delay_slot()->mov(out_lo, ZR); - } else { - __ mov(out_lo, ZR); - } - break; - } - default: - UNREACHABLE(); - } - __ Bind(&done); - } -} - - -LocationSummary* UnaryMintOpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - summary->set_out(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - return summary; -} - - -void UnaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(op_kind() == Token::kBIT_NOT); - PairLocation* left_pair = locs()->in(0).AsPairLocation(); - Register left_lo = left_pair->At(0).reg(); - Register left_hi = left_pair->At(1).reg(); - - PairLocation* out_pair = locs()->out(0).AsPairLocation(); - Register out_lo = out_pair->At(0).reg(); - Register out_hi = out_pair->At(1).reg(); - - __ nor(out_lo, ZR, left_lo); - __ nor(out_hi, ZR, left_hi); -} - - -CompileType BinaryUint32OpInstr::ComputeType() const { - return CompileType::Int(); -} - - -CompileType ShiftUint32OpInstr::ComputeType() const { - return CompileType::Int(); -} - - -CompileType UnaryUint32OpInstr::ComputeType() const { - return CompileType::Int(); -} - - -LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register left = locs()->in(0).reg(); - Register right = locs()->in(1).reg(); - Register out = locs()->out(0).reg(); - ASSERT(out != left); - switch (op_kind()) { - case Token::kBIT_AND: - __ and_(out, left, right); - break; - case Token::kBIT_OR: - __ or_(out, left, right); - break; - case Token::kBIT_XOR: - __ xor_(out, left, right); - break; - case Token::kADD: - __ addu(out, left, right); - break; - case Token::kSUB: - __ subu(out, left, right); - break; - case Token::kMUL: - __ multu(left, right); - __ mflo(out); - break; - default: - UNREACHABLE(); - } -} - - -LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 1; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - summary->set_in(1, Location::RegisterOrSmiConstant(right())); - summary->set_temp(0, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - const intptr_t kShifterLimit = 31; - - Register left = locs()->in(0).reg(); - Register out = locs()->out(0).reg(); - Register temp = locs()->temp(0).reg(); - - ASSERT(left != out); - - Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); - - if (locs()->in(1).IsConstant()) { - // Shifter is constant. - - const Object& constant = locs()->in(1).constant(); - ASSERT(constant.IsSmi()); - const intptr_t shift_value = Smi::Cast(constant).Value(); - - // Do the shift: (shift_value > 0) && (shift_value <= kShifterLimit). - switch (op_kind()) { - case Token::kSHR: - __ srl(out, left, shift_value); - break; - case Token::kSHL: - __ sll(out, left, shift_value); - break; - default: - UNREACHABLE(); - } - return; - } - - // Non constant shift value. - Register shifter = locs()->in(1).reg(); - - __ SmiUntag(temp, shifter); - // If shift value is < 0, deoptimize. - __ bltz(temp, deopt); - __ delay_slot()->mov(out, left); - __ sltiu(CMPRES1, temp, Immediate(kShifterLimit + 1)); - __ movz(out, ZR, CMPRES1); // out = shift > kShifterLimit ? 0 : left. - // Do the shift % 32. - switch (op_kind()) { - case Token::kSHR: - __ srlv(out, out, temp); - break; - case Token::kSHL: - __ sllv(out, out, temp); - break; - default: - UNREACHABLE(); - } -} - - -LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - summary->set_in(0, Location::RequiresRegister()); - summary->set_out(0, Location::RequiresRegister()); - return summary; -} - - -void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register left = locs()->in(0).reg(); - Register out = locs()->out(0).reg(); - ASSERT(left != out); - - ASSERT(op_kind() == Token::kBIT_NOT); - - __ nor(out, ZR, left); -} - - -DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr) - - -LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - if (from() == kUnboxedMint) { - ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32)); - summary->set_in(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - summary->set_out(0, Location::RequiresRegister()); - } else if (to() == kUnboxedMint) { - ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32)); - summary->set_in(0, Location::RequiresRegister()); - summary->set_out(0, Location::Pair(Location::RequiresRegister(), - Location::RequiresRegister())); - } else { - ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32)); - ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32)); - summary->set_in(0, Location::RequiresRegister()); - summary->set_out(0, Location::SameAsFirstInput()); - } - return summary; -} - - -void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - if (from() == kUnboxedInt32 && to() == kUnboxedUint32) { - const Register out = locs()->out(0).reg(); - // Representations are bitwise equivalent. - ASSERT(out == locs()->in(0).reg()); - } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) { - const Register out = locs()->out(0).reg(); - // Representations are bitwise equivalent. - ASSERT(out == locs()->in(0).reg()); - if (CanDeoptimize()) { - Label* deopt = - compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger); - __ BranchSignedLess(out, Immediate(0), deopt); - } - } else if (from() == kUnboxedMint) { - ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32); - PairLocation* in_pair = locs()->in(0).AsPairLocation(); - Register in_lo = in_pair->At(0).reg(); - Register in_hi = in_pair->At(1).reg(); - Register out = locs()->out(0).reg(); - // Copy low word. - __ mov(out, in_lo); - if (CanDeoptimize()) { - Label* deopt = - compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger); - ASSERT(to() == kUnboxedInt32); - __ sra(TMP, in_lo, 31); - __ bne(in_hi, TMP, deopt); - } - } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) { - ASSERT(to() == kUnboxedMint); - Register in = locs()->in(0).reg(); - PairLocation* out_pair = locs()->out(0).AsPairLocation(); - Register out_lo = out_pair->At(0).reg(); - Register out_hi = out_pair->At(1).reg(); - // Copy low word. - __ mov(out_lo, in); - if (from() == kUnboxedUint32) { - __ xor_(out_hi, out_hi, out_hi); - } else { - ASSERT(from() == kUnboxedInt32); - __ sra(out_hi, in, 31); - } - } else { - UNREACHABLE(); - } -} - - -LocationSummary* ThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const { - return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall); -} - - -void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - compiler->GenerateRuntimeCall(token_pos(), deopt_id(), kThrowRuntimeEntry, 1, - locs()); - __ break_(0); -} - - -LocationSummary* ReThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const { - return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall); -} - - -void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - compiler->SetNeedsStackTrace(catch_try_index()); - compiler->GenerateRuntimeCall(token_pos(), deopt_id(), kReThrowRuntimeEntry, - 2, locs()); - __ break_(0); -} - - -LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const { - return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); -} - - -void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Stop(message()); -} - - -void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - if (!compiler->CanFallThroughTo(normal_entry())) { - __ b(compiler->GetJumpLabel(normal_entry())); - } -} - - -LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const { - return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); -} - - -void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("GotoInstr"); - if (!compiler->is_optimizing()) { - if (FLAG_reorder_basic_blocks) { - compiler->EmitEdgeCounter(block()->preorder_number()); - } - // Add a deoptimization descriptor for deoptimizing instructions that - // may be inserted before this instruction. - compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(), - TokenPosition::kNoSource); - } - if (HasParallelMove()) { - compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); - } - - // We can fall through if the successor is the next block in the list. - // Otherwise, we need a jump. - if (!compiler->CanFallThroughTo(successor())) { - __ b(compiler->GetJumpLabel(successor())); - } -} - - -LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 1; - - LocationSummary* summary = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - - summary->set_in(0, Location::RequiresRegister()); - summary->set_temp(0, Location::RequiresRegister()); - - return summary; -} - - -void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register target_reg = locs()->temp_slot(0)->reg(); - - __ GetNextPC(target_reg, TMP); - const intptr_t entry_offset = __ CodeSize() - 1 * Instr::kInstrSize; - __ AddImmediate(target_reg, target_reg, -entry_offset); - - // Add the offset. - Register offset_reg = locs()->in(0).reg(); - if (offset()->definition()->representation() == kTagged) { - __ SmiUntag(offset_reg); - } - __ addu(target_reg, target_reg, offset_reg); - - // Jump to the absolute address. - __ jr(target_reg); -} - - -LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 2; - const intptr_t kNumTemps = 0; - if (needs_number_check()) { - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - locs->set_in(0, Location::RegisterLocation(A0)); - locs->set_in(1, Location::RegisterLocation(A1)); - locs->set_out(0, Location::RegisterLocation(A0)); - return locs; - } - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); - locs->set_in(0, Location::RegisterOrConstant(left())); - // Only one of the inputs can be a constant. Choose register if the first one - // is a constant. - locs->set_in(1, locs->in(0).IsConstant() - ? Location::RequiresRegister() - : Location::RegisterOrConstant(right())); - locs->set_out(0, Location::RequiresRegister()); - return locs; -} - - -Condition StrictCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, - BranchLabels labels) { - Location left = locs()->in(0); - Location right = locs()->in(1); - ASSERT(!left.IsConstant() || !right.IsConstant()); - Condition true_condition; - if (left.IsConstant()) { - true_condition = compiler->EmitEqualityRegConstCompare( - right.reg(), left.constant(), needs_number_check(), token_pos(), - deopt_id_); - } else if (right.IsConstant()) { - true_condition = compiler->EmitEqualityRegConstCompare( - left.reg(), right.constant(), needs_number_check(), token_pos(), - deopt_id_); - } else { - true_condition = compiler->EmitEqualityRegRegCompare( - left.reg(), right.reg(), needs_number_check(), token_pos(), deopt_id_); - } - if (kind() != Token::kEQ_STRICT) { - ASSERT(kind() == Token::kNE_STRICT); - true_condition = NegateCondition(true_condition); - } - return true_condition; -} - - -LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - return LocationSummary::Make(zone, 1, Location::RequiresRegister(), - LocationSummary::kNoCall); -} - - -void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - Register value = locs()->in(0).reg(); - Register result = locs()->out(0).reg(); - - __ LoadObject(result, Bool::True()); - __ LoadObject(TMP, Bool::False()); - __ subu(CMPRES1, value, result); - __ movz(result, TMP, CMPRES1); // If value is True, move False into result. -} - - -LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - return MakeCallSummary(zone); -} - - -void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - __ Comment("AllocateObjectInstr"); - const Code& stub = Code::ZoneHandle( - compiler->zone(), StubCode::GetAllocationStubForClass(cls())); - const StubEntry stub_entry(stub); - compiler->GenerateCall(token_pos(), stub_entry, RawPcDescriptors::kOther, - locs()); - compiler->AddStubCallTarget(stub); - __ Drop(ArgumentCount()); // Discard arguments. -} - - -void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - ASSERT(!compiler->is_optimizing()); - __ BranchLinkPatchable(*StubCode::DebugStepCheck_entry()); - compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, token_pos()); - compiler->RecordSafepoint(locs()); -} - - -LocationSummary* GrowRegExpStackInstr::MakeLocationSummary(Zone* zone, - bool opt) const { - const intptr_t kNumInputs = 1; - const intptr_t kNumTemps = 0; - LocationSummary* locs = new (zone) - LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); - locs->set_in(0, Location::RegisterLocation(T0)); - locs->set_out(0, Location::RegisterLocation(T0)); - return locs; -} - - -void GrowRegExpStackInstr::EmitNativeCode(FlowGraphCompiler* compiler) { - const Register typed_data = locs()->in(0).reg(); - const Register result = locs()->out(0).reg(); - __ Comment("GrowRegExpStackInstr"); - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ LoadObject(TMP, Object::null_object()); - __ sw(TMP, Address(SP, 1 * kWordSize)); - __ sw(typed_data, Address(SP, 0 * kWordSize)); - compiler->GenerateRuntimeCall(TokenPosition::kNoSource, deopt_id(), - kGrowRegExpStackRuntimeEntry, 1, locs()); - __ lw(result, Address(SP, 1 * kWordSize)); - __ addiu(SP, SP, Immediate(2 * kWordSize)); -} - - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/intrinsifier_mips.cc b/runtime/vm/intrinsifier_mips.cc deleted file mode 100644 index 1dca1af5810..00000000000 --- a/runtime/vm/intrinsifier_mips.cc +++ /dev/null @@ -1,2444 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. -#if defined(TARGET_ARCH_MIPS) - -#include "vm/intrinsifier.h" - -#include "vm/assembler.h" -#include "vm/dart_entry.h" -#include "vm/flow_graph_compiler.h" -#include "vm/object.h" -#include "vm/object_store.h" -#include "vm/regexp_assembler.h" -#include "vm/symbols.h" -#include "vm/timeline.h" - -namespace dart { - -// When entering intrinsics code: -// S4: Arguments descriptor -// RA: Return address -// The S4 register can be destroyed only if there is no slow-path, i.e. -// if the intrinsified method always executes a return. -// The FP register should not be modified, because it is used by the profiler. -// The PP and THR registers (see constants_mips.h) must be preserved. - -#define __ assembler-> - - -intptr_t Intrinsifier::ParameterSlotFromSp() { - return -1; -} - - -static bool IsABIPreservedRegister(Register reg) { - return ((1 << reg) & kAbiPreservedCpuRegs) != 0; -} - -void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { - ASSERT(IsABIPreservedRegister(CODE_REG)); - ASSERT(IsABIPreservedRegister(ARGS_DESC_REG)); - ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP)); - ASSERT(CALLEE_SAVED_TEMP != CODE_REG); - ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG); - - assembler->Comment("IntrinsicCallPrologue"); - assembler->mov(CALLEE_SAVED_TEMP, LRREG); -} - - -void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) { - assembler->Comment("IntrinsicCallEpilogue"); - assembler->mov(LRREG, CALLEE_SAVED_TEMP); -} - - -// Intrinsify only for Smi value and index. Non-smi values need a store buffer -// update. Array length is always a Smi. -void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) { - if (Isolate::Current()->type_checks()) { - return; - } - - Label fall_through; - __ lw(T1, Address(SP, 1 * kWordSize)); // Index. - __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); - // Index not Smi. - __ bne(CMPRES1, ZR, &fall_through); - - __ lw(T0, Address(SP, 2 * kWordSize)); // Array. - // Range check. - __ lw(T3, FieldAddress(T0, Array::length_offset())); // Array length. - // Runtime throws exception. - __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); - - // Note that T1 is Smi, i.e, times 2. - ASSERT(kSmiTagShift == 1); - __ lw(T2, Address(SP, 0 * kWordSize)); // Value. - __ sll(T1, T1, 1); // T1 is Smi. - __ addu(T1, T0, T1); - __ StoreIntoObject(T0, FieldAddress(T1, Array::data_offset()), T2); - // Caller is responsible for preserving the value if necessary. - __ Ret(); - __ Bind(&fall_through); -} - - -// Allocate a GrowableObjectArray using the backing array specified. -// On stack: type argument (+1), data (+0). -void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { - // The newly allocated object is returned in V0. - const intptr_t kTypeArgumentsOffset = 1 * kWordSize; - const intptr_t kArrayOffset = 0 * kWordSize; - Label fall_through; - - // Try allocating in new space. - const Class& cls = Class::Handle( - Isolate::Current()->object_store()->growable_object_array_class()); - __ TryAllocate(cls, &fall_through, V0, T1); - - // Store backing array object in growable array object. - __ lw(T1, Address(SP, kArrayOffset)); // Data argument. - // V0 is new, no barrier needed. - __ StoreIntoObjectNoBarrier( - V0, FieldAddress(V0, GrowableObjectArray::data_offset()), T1); - - // V0: new growable array object start as a tagged pointer. - // Store the type argument field in the growable array object. - __ lw(T1, Address(SP, kTypeArgumentsOffset)); // Type argument. - __ StoreIntoObjectNoBarrier( - V0, FieldAddress(V0, GrowableObjectArray::type_arguments_offset()), T1); - // Set the length field in the growable array object to 0. - __ Ret(); // Returns the newly allocated object in V0. - __ delay_slot()->sw(ZR, - FieldAddress(V0, GrowableObjectArray::length_offset())); - - __ Bind(&fall_through); -} - - -// Add an element to growable array if it doesn't need to grow, otherwise -// call into regular code. -// On stack: growable array (+1), value (+0). -void Intrinsifier::GrowableArray_add(Assembler* assembler) { - // In checked mode we need to type-check the incoming argument. - if (Isolate::Current()->type_checks()) return; - Label fall_through; - __ lw(T0, Address(SP, 1 * kWordSize)); // Array. - __ lw(T1, FieldAddress(T0, GrowableObjectArray::length_offset())); - // T1: length. - __ lw(T2, FieldAddress(T0, GrowableObjectArray::data_offset())); - // T2: data. - __ lw(T3, FieldAddress(T2, Array::length_offset())); - // Compare length with capacity. - // T3: capacity. - __ beq(T1, T3, &fall_through); // Must grow data. - const int32_t value_one = reinterpret_cast(Smi::New(1)); - // len = len + 1; - __ addiu(T3, T1, Immediate(value_one)); - __ sw(T3, FieldAddress(T0, GrowableObjectArray::length_offset())); - __ lw(T0, Address(SP, 0 * kWordSize)); // Value. - ASSERT(kSmiTagShift == 1); - __ sll(T1, T1, 1); - __ addu(T1, T2, T1); - __ StoreIntoObject(T2, FieldAddress(T1, Array::data_offset()), T0); - __ LoadObject(T7, Object::null_object()); - __ Ret(); - __ delay_slot()->mov(V0, T7); - __ Bind(&fall_through); -} - - -#define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ - Label fall_through; \ - const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ - NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, T2, &fall_through)); \ - __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ - /* Check that length is a positive Smi. */ \ - /* T2: requested array length argument. */ \ - __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); \ - __ bne(CMPRES1, ZR, &fall_through); \ - __ BranchSignedLess(T2, Immediate(0), &fall_through); \ - __ SmiUntag(T2); \ - /* Check for maximum allowed length. */ \ - /* T2: untagged array length. */ \ - __ BranchSignedGreater(T2, Immediate(max_len), &fall_through); \ - __ sll(T2, T2, scale_shift); \ - const intptr_t fixed_size_plus_alignment_padding = \ - sizeof(Raw##type_name) + kObjectAlignment - 1; \ - __ AddImmediate(T2, fixed_size_plus_alignment_padding); \ - __ LoadImmediate(TMP, -kObjectAlignment); \ - __ and_(T2, T2, TMP); \ - Heap::Space space = Heap::kNew; \ - __ lw(T3, Address(THR, Thread::heap_offset())); \ - __ lw(V0, Address(T3, Heap::TopOffset(space))); \ - \ - /* T2: allocation size. */ \ - __ addu(T1, V0, T2); \ - /* Branch on unsigned overflow. */ \ - __ BranchUnsignedLess(T1, V0, &fall_through); \ - \ - /* Check if the allocation fits into the remaining space. */ \ - /* V0: potential new object start. */ \ - /* T1: potential next object start. */ \ - /* T2: allocation size. */ \ - /* T3: heap. */ \ - __ lw(T4, Address(T3, Heap::EndOffset(space))); \ - __ BranchUnsignedGreaterEqual(T1, T4, &fall_through); \ - \ - /* Successfully allocated the object(s), now update top to point to */ \ - /* next object start and initialize the object. */ \ - __ sw(T1, Address(T3, Heap::TopOffset(space))); \ - __ AddImmediate(V0, kHeapObjectTag); \ - NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T4, space)); \ - /* Initialize the tags. */ \ - /* V0: new object start as a tagged pointer. */ \ - /* T1: new object end address. */ \ - /* T2: allocation size. */ \ - { \ - Label size_tag_overflow, done; \ - __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), \ - &size_tag_overflow); \ - __ b(&done); \ - __ delay_slot()->sll(T2, T2, \ - RawObject::kSizeTagPos - kObjectAlignmentLog2); \ - \ - __ Bind(&size_tag_overflow); \ - __ mov(T2, ZR); \ - __ Bind(&done); \ - \ - /* Get the class index and insert it into the tags. */ \ - __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ - __ or_(T2, T2, TMP); \ - __ sw(T2, FieldAddress(V0, type_name::tags_offset())); /* Tags. */ \ - } \ - /* Set the length field. */ \ - /* V0: new object start as a tagged pointer. */ \ - /* T1: new object end address. */ \ - __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ - __ StoreIntoObjectNoBarrier( \ - V0, FieldAddress(V0, type_name::length_offset()), T2); \ - /* Initialize all array elements to 0. */ \ - /* V0: new object start as a tagged pointer. */ \ - /* T1: new object end address. */ \ - /* T2: iterator which initially points to the start of the variable */ \ - /* data area to be initialized. */ \ - __ AddImmediate(T2, V0, sizeof(Raw##type_name) - 1); \ - Label done, init_loop; \ - __ Bind(&init_loop); \ - __ BranchUnsignedGreaterEqual(T2, T1, &done); \ - __ sw(ZR, Address(T2, 0)); \ - __ b(&init_loop); \ - __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); \ - __ Bind(&done); \ - \ - __ Ret(); \ - __ Bind(&fall_through); - - -static int GetScaleFactor(intptr_t size) { - switch (size) { - case 1: - return 0; - case 2: - return 1; - case 4: - return 2; - case 8: - return 3; - case 16: - return 4; - } - UNREACHABLE(); - return -1; -} - - -#define TYPED_DATA_ALLOCATOR(clazz) \ - void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ - intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ - intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ - int shift = GetScaleFactor(size); \ - TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ - } -CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) -#undef TYPED_DATA_ALLOCATOR - - -// Loads args from stack into T0 and T1 -// Tests if they are smis, jumps to label not_smi if not. -static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { - __ lw(T0, Address(SP, 0 * kWordSize)); - __ lw(T1, Address(SP, 1 * kWordSize)); - __ or_(CMPRES1, T0, T1); - __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, not_smi); - return; -} - - -void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { - Label fall_through; - - TestBothArgumentsSmis(assembler, &fall_through); // Checks two Smis. - __ AdduDetectOverflow(V0, T0, T1, CMPRES1); // Add. - __ bltz(CMPRES1, &fall_through); // Fall through on overflow. - __ Ret(); // Nothing in branch delay slot. - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_add(Assembler* assembler) { - Integer_addFromInteger(assembler); -} - - -void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { - Label fall_through; - - TestBothArgumentsSmis(assembler, &fall_through); - __ SubuDetectOverflow(V0, T0, T1, CMPRES1); // Subtract. - __ bltz(CMPRES1, &fall_through); // Fall through on overflow. - __ Ret(); - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_sub(Assembler* assembler) { - Label fall_through; - - TestBothArgumentsSmis(assembler, &fall_through); - __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. - __ bltz(CMPRES1, &fall_through); // Fall through on overflow. - __ Ret(); // Nothing in branch delay slot. - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { - Label fall_through; - - TestBothArgumentsSmis(assembler, &fall_through); // checks two smis - __ SmiUntag(T0); // untags T0. only want result shifted by one - - __ mult(T0, T1); // HI:LO <- T0 * T1. - __ mflo(V0); // V0 <- LO. - __ mfhi(T2); // T2 <- HI. - __ sra(T3, V0, 31); // T3 <- V0 >> 31. - __ bne(T2, T3, &fall_through); // Fall through on overflow. - __ Ret(); - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_mul(Assembler* assembler) { - Integer_mulFromInteger(assembler); -} - - -// Optimizations: -// - result is 0 if: -// - left is 0 -// - left equals right -// - result is left if -// - left > 0 && left < right -// T1: Tagged left (dividend). -// T0: Tagged right (divisor). -// Returns: -// V0: Untagged fallthrough result (remainder to be adjusted), or -// V0: Tagged return result (remainder). -static void EmitRemainderOperation(Assembler* assembler) { - Label return_zero, modulo; - const Register left = T1; - const Register right = T0; - const Register result = V0; - - __ beq(left, ZR, &return_zero); - __ beq(left, right, &return_zero); - - __ bltz(left, &modulo); - // left is positive. - __ BranchSignedGreaterEqual(left, right, &modulo); - // left is less than right. return left. - __ Ret(); - __ delay_slot()->mov(result, left); - - __ Bind(&return_zero); - __ Ret(); - __ delay_slot()->mov(result, ZR); - - __ Bind(&modulo); - __ SmiUntag(right); - __ SmiUntag(left); - __ div(left, right); // Divide, remainder goes in HI. - __ mfhi(result); // result <- HI. - return; -} - - -// Implementation: -// res = left % right; -// if (res < 0) { -// if (right < 0) { -// res = res - right; -// } else { -// res = res + right; -// } -// } -void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { - Label fall_through, subtract; - // Test arguments for smi. - __ lw(T1, Address(SP, 0 * kWordSize)); - __ lw(T0, Address(SP, 1 * kWordSize)); - __ or_(CMPRES1, T0, T1); - __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, &fall_through); - // T1: Tagged left (dividend). - // T0: Tagged right (divisor). - // Check if modulo by zero -> exception thrown in main function. - __ beq(T0, ZR, &fall_through); - EmitRemainderOperation(assembler); - // Untagged right in T0. Untagged remainder result in V0. - - Label done; - __ bgez(V0, &done); - __ bltz(T0, &subtract); - __ addu(V0, V0, T0); - __ Ret(); - __ delay_slot()->SmiTag(V0); - - __ Bind(&subtract); - __ subu(V0, V0, T0); - __ Ret(); - __ delay_slot()->SmiTag(V0); - - __ Bind(&done); - __ Ret(); - __ delay_slot()->SmiTag(V0); - - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_truncDivide(Assembler* assembler) { - Label fall_through; - - TestBothArgumentsSmis(assembler, &fall_through); - __ beq(T0, ZR, &fall_through); // If b is 0, fall through. - - __ SmiUntag(T0); - __ SmiUntag(T1); - __ div(T1, T0); // LO <- T1 / T0 - __ mflo(V0); // V0 <- LO - // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we - // cannot tag the result. - __ BranchEqual(V0, Immediate(0x40000000), &fall_through); - __ Ret(); - __ delay_slot()->SmiTag(V0); - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_negate(Assembler* assembler) { - Label fall_through; - - __ lw(T0, Address(SP, +0 * kWordSize)); // Grabs first argument. - __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); // Test for Smi. - __ bne(CMPRES1, ZR, &fall_through); // Fall through if not a Smi. - __ SubuDetectOverflow(V0, ZR, T0, CMPRES1); - __ bltz(CMPRES1, &fall_through); // There was overflow. - __ Ret(); - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { - Label fall_through; - - TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. - __ Ret(); - __ delay_slot()->and_(V0, T0, T1); - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_bitAnd(Assembler* assembler) { - Integer_bitAndFromInteger(assembler); -} - - -void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { - Label fall_through; - - TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. - __ Ret(); - __ delay_slot()->or_(V0, T0, T1); - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_bitOr(Assembler* assembler) { - Integer_bitOrFromInteger(assembler); -} - - -void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { - Label fall_through; - - TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. - __ Ret(); - __ delay_slot()->xor_(V0, T0, T1); - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_bitXor(Assembler* assembler) { - Integer_bitXorFromInteger(assembler); -} - - -void Intrinsifier::Integer_shl(Assembler* assembler) { - ASSERT(kSmiTagShift == 1); - ASSERT(kSmiTag == 0); - Label fall_through, overflow; - - TestBothArgumentsSmis(assembler, &fall_through); - __ BranchUnsignedGreater(T0, Immediate(Smi::RawValue(Smi::kBits)), - &fall_through); - __ SmiUntag(T0); - - // Check for overflow by shifting left and shifting back arithmetically. - // If the result is different from the original, there was overflow. - __ sllv(TMP, T1, T0); - __ srav(CMPRES1, TMP, T0); - __ bne(CMPRES1, T1, &overflow); - - // No overflow, result in V0. - __ Ret(); - __ delay_slot()->sllv(V0, T1, T0); - - __ Bind(&overflow); - // Arguments are Smi but the shift produced an overflow to Mint. - __ bltz(T1, &fall_through); - __ SmiUntag(T1); - - // Pull off high bits that will be shifted off of T1 by making a mask - // ((1 << T0) - 1), shifting it to the right, masking T1, then shifting back. - // high bits = (((1 << T0) - 1) << (32 - T0)) & T1) >> (32 - T0) - // lo bits = T1 << T0 - __ LoadImmediate(T3, 1); - __ sllv(T3, T3, T0); // T3 <- T3 << T0 - __ addiu(T3, T3, Immediate(-1)); // T3 <- T3 - 1 - __ subu(T4, ZR, T0); // T4 <- -T0 - __ addiu(T4, T4, Immediate(32)); // T4 <- 32 - T0 - __ sllv(T3, T3, T4); // T3 <- T3 << T4 - __ and_(T3, T3, T1); // T3 <- T3 & T1 - __ srlv(T3, T3, T4); // T3 <- T3 >> T4 - // Now T3 has the bits that fall off of T1 on a left shift. - __ sllv(T0, T1, T0); // T0 gets low bits. - - const Class& mint_class = - Class::Handle(Isolate::Current()->object_store()->mint_class()); - __ TryAllocate(mint_class, &fall_through, V0, T1); - - __ sw(T0, FieldAddress(V0, Mint::value_offset())); - __ Ret(); - __ delay_slot()->sw(T3, FieldAddress(V0, Mint::value_offset() + kWordSize)); - __ Bind(&fall_through); -} - - -static void Get64SmiOrMint(Assembler* assembler, - Register res_hi, - Register res_lo, - Register reg, - Label* not_smi_or_mint) { - Label not_smi, done; - __ andi(CMPRES1, reg, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, ¬_smi); - __ SmiUntag(reg); - - // Sign extend to 64 bit - __ mov(res_lo, reg); - __ b(&done); - __ delay_slot()->sra(res_hi, reg, 31); - - __ Bind(¬_smi); - __ LoadClassId(CMPRES1, reg); - __ BranchNotEqual(CMPRES1, Immediate(kMintCid), not_smi_or_mint); - - // Mint. - __ lw(res_lo, FieldAddress(reg, Mint::value_offset())); - __ lw(res_hi, FieldAddress(reg, Mint::value_offset() + kWordSize)); - __ Bind(&done); - return; -} - - -static void CompareIntegers(Assembler* assembler, RelationOperator rel_op) { - Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through; - TestBothArgumentsSmis(assembler, &try_mint_smi); - // T0 contains the right argument. T1 contains left argument - - switch (rel_op) { - case LT: - __ BranchSignedLess(T1, T0, &is_true); - break; - case LE: - __ BranchSignedLessEqual(T1, T0, &is_true); - break; - case GT: - __ BranchSignedGreater(T1, T0, &is_true); - break; - case GE: - __ BranchSignedGreaterEqual(T1, T0, &is_true); - break; - default: - UNREACHABLE(); - break; - } - - __ Bind(&is_false); - __ LoadObject(V0, Bool::False()); - __ Ret(); - __ Bind(&is_true); - __ LoadObject(V0, Bool::True()); - __ Ret(); - - __ Bind(&try_mint_smi); - // Get left as 64 bit integer. - Get64SmiOrMint(assembler, T3, T2, T1, &fall_through); - // Get right as 64 bit integer. - Get64SmiOrMint(assembler, T5, T4, T0, &fall_through); - // T3: left high. - // T2: left low. - // T5: right high. - // T4: right low. - - // 64-bit comparison - switch (rel_op) { - case LT: - case LE: { - // Compare left hi, right high. - __ BranchSignedGreater(T3, T5, &is_false); - __ BranchSignedLess(T3, T5, &is_true); - // Compare left lo, right lo. - if (rel_op == LT) { - __ BranchUnsignedGreaterEqual(T2, T4, &is_false); - } else { - __ BranchUnsignedGreater(T2, T4, &is_false); - } - break; - } - case GT: - case GE: { - // Compare left hi, right high. - __ BranchSignedLess(T3, T5, &is_false); - __ BranchSignedGreater(T3, T5, &is_true); - // Compare left lo, right lo. - if (rel_op == GT) { - __ BranchUnsignedLessEqual(T2, T4, &is_false); - } else { - __ BranchUnsignedLess(T2, T4, &is_false); - } - break; - } - default: - UNREACHABLE(); - break; - } - // Else is true. - __ b(&is_true); - - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { - CompareIntegers(assembler, LT); -} - - -void Intrinsifier::Integer_lessThan(Assembler* assembler) { - CompareIntegers(assembler, LT); -} - - -void Intrinsifier::Integer_greaterThan(Assembler* assembler) { - CompareIntegers(assembler, GT); -} - - -void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { - CompareIntegers(assembler, LE); -} - - -void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { - CompareIntegers(assembler, GE); -} - - -// This is called for Smi, Mint and Bigint receivers. The right argument -// can be Smi, Mint, Bigint or double. -void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { - Label fall_through, true_label, check_for_mint; - // For integer receiver '===' check first. - __ lw(T0, Address(SP, 0 * kWordSize)); - __ lw(T1, Address(SP, 1 * kWordSize)); - __ beq(T0, T1, &true_label); - - __ or_(T2, T0, T1); - __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); - // If T0 or T1 is not a smi do Mint checks. - __ bne(CMPRES1, ZR, &check_for_mint); - - // Both arguments are smi, '===' is good enough. - __ LoadObject(V0, Bool::False()); - __ Ret(); - __ Bind(&true_label); - __ LoadObject(V0, Bool::True()); - __ Ret(); - - // At least one of the arguments was not Smi. - Label receiver_not_smi; - __ Bind(&check_for_mint); - - __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, &receiver_not_smi); // Check receiver. - - // Left (receiver) is Smi, return false if right is not Double. - // Note that an instance of Mint or Bigint never contains a value that can be - // represented by Smi. - - __ LoadClassId(CMPRES1, T0); - __ BranchEqual(CMPRES1, Immediate(kDoubleCid), &fall_through); - __ LoadObject(V0, Bool::False()); // Smi == Mint -> false. - __ Ret(); - - __ Bind(&receiver_not_smi); - // T1:: receiver. - - __ LoadClassId(CMPRES1, T1); - __ BranchNotEqual(CMPRES1, Immediate(kMintCid), &fall_through); - // Receiver is Mint, return false if right is Smi. - __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, &fall_through); - __ LoadObject(V0, Bool::False()); - __ Ret(); - // TODO(srdjan): Implement Mint == Mint comparison. - - __ Bind(&fall_through); -} - - -void Intrinsifier::Integer_equal(Assembler* assembler) { - Integer_equalToInteger(assembler); -} - - -void Intrinsifier::Integer_sar(Assembler* assembler) { - Label fall_through; - - TestBothArgumentsSmis(assembler, &fall_through); - // Shift amount in T0. Value to shift in T1. - - __ SmiUntag(T0); - __ bltz(T0, &fall_through); - - __ LoadImmediate(T2, 0x1F); - __ slt(CMPRES1, T2, T0); // CMPRES1 <- 0x1F < T0 ? 1 : 0 - __ movn(T0, T2, CMPRES1); // T0 <- 0x1F < T0 ? 0x1F : T0 - - __ SmiUntag(T1); - __ srav(V0, T1, T0); - __ Ret(); - __ delay_slot()->SmiTag(V0); - __ Bind(&fall_through); -} - - -void Intrinsifier::Smi_bitNegate(Assembler* assembler) { - __ lw(T0, Address(SP, 0 * kWordSize)); - __ nor(V0, T0, ZR); - __ Ret(); - __ delay_slot()->addiu(V0, V0, Immediate(-1)); // Remove inverted smi-tag. -} - - -void Intrinsifier::Smi_bitLength(Assembler* assembler) { - __ lw(V0, Address(SP, 0 * kWordSize)); - __ SmiUntag(V0); - // XOR with sign bit to complement bits if value is negative. - __ sra(T0, V0, 31); - __ xor_(V0, V0, T0); - __ clz(V0, V0); - __ LoadImmediate(T0, 32); - __ subu(V0, T0, V0); - __ Ret(); - __ delay_slot()->SmiTag(V0); -} - - -void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler) { - Integer_bitAndFromInteger(assembler); -} - - -void Intrinsifier::Bigint_lsh(Assembler* assembler) { - // static void _lsh(Uint32List x_digits, int x_used, int n, - // Uint32List r_digits) - - // T2 = x_used, T3 = x_digits, x_used > 0, x_used is Smi. - __ lw(T2, Address(SP, 2 * kWordSize)); - __ lw(T3, Address(SP, 3 * kWordSize)); - // T4 = r_digits, T5 = n, n is Smi, n % _DIGIT_BITS != 0. - __ lw(T4, Address(SP, 0 * kWordSize)); - __ lw(T5, Address(SP, 1 * kWordSize)); - __ SmiUntag(T5); - // T0 = n ~/ _DIGIT_BITS - __ sra(T0, T5, 5); - // T6 = &x_digits[0] - __ addiu(T6, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); - // V0 = &x_digits[x_used] - __ sll(T2, T2, 1); - __ addu(V0, T6, T2); - // V1 = &r_digits[1] - __ addiu(V1, T4, Immediate(TypedData::data_offset() - kHeapObjectTag + - Bigint::kBytesPerDigit)); - // V1 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] - __ addu(V1, V1, T2); - __ sll(T1, T0, 2); - __ addu(V1, V1, T1); - // T3 = n % _DIGIT_BITS - __ andi(T3, T5, Immediate(31)); - // T2 = 32 - T3 - __ subu(T2, ZR, T3); - __ addiu(T2, T2, Immediate(32)); - __ mov(T1, ZR); - Label loop; - __ Bind(&loop); - __ addiu(V0, V0, Immediate(-Bigint::kBytesPerDigit)); - __ lw(T0, Address(V0, 0)); - __ srlv(AT, T0, T2); - __ or_(T1, T1, AT); - __ addiu(V1, V1, Immediate(-Bigint::kBytesPerDigit)); - __ sw(T1, Address(V1, 0)); - __ bne(V0, T6, &loop); - __ delay_slot()->sllv(T1, T0, T3); - __ sw(T1, Address(V1, -Bigint::kBytesPerDigit)); - // Returning Object::null() is not required, since this method is private. - __ Ret(); -} - - -void Intrinsifier::Bigint_rsh(Assembler* assembler) { - // static void _lsh(Uint32List x_digits, int x_used, int n, - // Uint32List r_digits) - - // T2 = x_used, T3 = x_digits, x_used > 0, x_used is Smi. - __ lw(T2, Address(SP, 2 * kWordSize)); - __ lw(T3, Address(SP, 3 * kWordSize)); - // T4 = r_digits, T5 = n, n is Smi, n % _DIGIT_BITS != 0. - __ lw(T4, Address(SP, 0 * kWordSize)); - __ lw(T5, Address(SP, 1 * kWordSize)); - __ SmiUntag(T5); - // T0 = n ~/ _DIGIT_BITS - __ sra(T0, T5, 5); - // V1 = &r_digits[0] - __ addiu(V1, T4, Immediate(TypedData::data_offset() - kHeapObjectTag)); - // V0 = &x_digits[n ~/ _DIGIT_BITS] - __ addiu(V0, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); - __ sll(T1, T0, 2); - __ addu(V0, V0, T1); - // T6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] - __ sll(T2, T2, 1); - __ addu(T6, V1, T2); - __ subu(T6, T6, T1); - __ addiu(T6, T6, Immediate(-4)); - // T3 = n % _DIGIT_BITS - __ andi(T3, T5, Immediate(31)); - // T2 = 32 - T3 - __ subu(T2, ZR, T3); - __ addiu(T2, T2, Immediate(32)); - // T1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) - __ lw(T1, Address(V0, 0)); - __ addiu(V0, V0, Immediate(Bigint::kBytesPerDigit)); - Label loop_exit; - __ beq(V1, T6, &loop_exit); - __ delay_slot()->srlv(T1, T1, T3); - Label loop; - __ Bind(&loop); - __ lw(T0, Address(V0, 0)); - __ addiu(V0, V0, Immediate(Bigint::kBytesPerDigit)); - __ sllv(AT, T0, T2); - __ or_(T1, T1, AT); - __ sw(T1, Address(V1, 0)); - __ addiu(V1, V1, Immediate(Bigint::kBytesPerDigit)); - __ bne(V1, T6, &loop); - __ delay_slot()->srlv(T1, T0, T3); - __ Bind(&loop_exit); - __ sw(T1, Address(V1, 0)); - // Returning Object::null() is not required, since this method is private. - __ Ret(); -} - - -void Intrinsifier::Bigint_absAdd(Assembler* assembler) { - // static void _absAdd(Uint32List digits, int used, - // Uint32List a_digits, int a_used, - // Uint32List r_digits) - - // T2 = used, T3 = digits - __ lw(T2, Address(SP, 3 * kWordSize)); - __ lw(T3, Address(SP, 4 * kWordSize)); - // T3 = &digits[0] - __ addiu(T3, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // T4 = a_used, T5 = a_digits - __ lw(T4, Address(SP, 1 * kWordSize)); - __ lw(T5, Address(SP, 2 * kWordSize)); - // T5 = &a_digits[0] - __ addiu(T5, T5, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // T6 = r_digits - __ lw(T6, Address(SP, 0 * kWordSize)); - // T6 = &r_digits[0] - __ addiu(T6, T6, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // V0 = &digits[a_used >> 1], a_used is Smi. - __ sll(V0, T4, 1); - __ addu(V0, V0, T3); - - // V1 = &digits[used >> 1], used is Smi. - __ sll(V1, T2, 1); - __ addu(V1, V1, T3); - - // T2 = carry in = 0. - __ mov(T2, ZR); - Label add_loop; - __ Bind(&add_loop); - // Loop a_used times, a_used > 0. - __ lw(T0, Address(T3, 0)); // T0 = x. - __ addiu(T3, T3, Immediate(Bigint::kBytesPerDigit)); - __ lw(T1, Address(T5, 0)); // T1 = y. - __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); - __ addu(T1, T0, T1); // T1 = x + y. - __ sltu(T4, T1, T0); // T4 = carry out of x + y. - __ addu(T0, T1, T2); // T0 = x + y + carry in. - __ sltu(T2, T0, T1); // T2 = carry out of (x + y) + carry in. - __ or_(T2, T2, T4); // T2 = carry out of x + y + carry in. - __ sw(T0, Address(T6, 0)); - __ bne(T3, V0, &add_loop); - __ delay_slot()->addiu(T6, T6, Immediate(Bigint::kBytesPerDigit)); - - Label last_carry; - __ beq(T3, V1, &last_carry); - - Label carry_loop; - __ Bind(&carry_loop); - // Loop used - a_used times, used - a_used > 0. - __ lw(T0, Address(T3, 0)); // T0 = x. - __ addiu(T3, T3, Immediate(Bigint::kBytesPerDigit)); - __ addu(T1, T0, T2); // T1 = x + carry in. - __ sltu(T2, T1, T0); // T2 = carry out of x + carry in. - __ sw(T1, Address(T6, 0)); - __ bne(T3, V1, &carry_loop); - __ delay_slot()->addiu(T6, T6, Immediate(Bigint::kBytesPerDigit)); - - __ Bind(&last_carry); - __ sw(T2, Address(T6, 0)); - - // Returning Object::null() is not required, since this method is private. - __ Ret(); -} - - -void Intrinsifier::Bigint_absSub(Assembler* assembler) { - // static void _absSub(Uint32List digits, int used, - // Uint32List a_digits, int a_used, - // Uint32List r_digits) - - // T2 = used, T3 = digits - __ lw(T2, Address(SP, 3 * kWordSize)); - __ lw(T3, Address(SP, 4 * kWordSize)); - // T3 = &digits[0] - __ addiu(T3, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // T4 = a_used, T5 = a_digits - __ lw(T4, Address(SP, 1 * kWordSize)); - __ lw(T5, Address(SP, 2 * kWordSize)); - // T5 = &a_digits[0] - __ addiu(T5, T5, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // T6 = r_digits - __ lw(T6, Address(SP, 0 * kWordSize)); - // T6 = &r_digits[0] - __ addiu(T6, T6, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // V0 = &digits[a_used >> 1], a_used is Smi. - __ sll(V0, T4, 1); - __ addu(V0, V0, T3); - - // V1 = &digits[used >> 1], used is Smi. - __ sll(V1, T2, 1); - __ addu(V1, V1, T3); - - // T2 = borrow in = 0. - __ mov(T2, ZR); - Label sub_loop; - __ Bind(&sub_loop); - // Loop a_used times, a_used > 0. - __ lw(T0, Address(T3, 0)); // T0 = x. - __ addiu(T3, T3, Immediate(Bigint::kBytesPerDigit)); - __ lw(T1, Address(T5, 0)); // T1 = y. - __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); - __ subu(T1, T0, T1); // T1 = x - y. - __ sltu(T4, T0, T1); // T4 = borrow out of x - y. - __ subu(T0, T1, T2); // T0 = x - y - borrow in. - __ sltu(T2, T1, T0); // T2 = borrow out of (x - y) - borrow in. - __ or_(T2, T2, T4); // T2 = borrow out of x - y - borrow in. - __ sw(T0, Address(T6, 0)); - __ bne(T3, V0, &sub_loop); - __ delay_slot()->addiu(T6, T6, Immediate(Bigint::kBytesPerDigit)); - - Label done; - __ beq(T3, V1, &done); - - Label borrow_loop; - __ Bind(&borrow_loop); - // Loop used - a_used times, used - a_used > 0. - __ lw(T0, Address(T3, 0)); // T0 = x. - __ addiu(T3, T3, Immediate(Bigint::kBytesPerDigit)); - __ subu(T1, T0, T2); // T1 = x - borrow in. - __ sltu(T2, T0, T1); // T2 = borrow out of x - borrow in. - __ sw(T1, Address(T6, 0)); - __ bne(T3, V1, &borrow_loop); - __ delay_slot()->addiu(T6, T6, Immediate(Bigint::kBytesPerDigit)); - - __ Bind(&done); - // Returning Object::null() is not required, since this method is private. - __ Ret(); -} - - -void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { - // Pseudo code: - // static int _mulAdd(Uint32List x_digits, int xi, - // Uint32List m_digits, int i, - // Uint32List a_digits, int j, int n) { - // uint32_t x = x_digits[xi >> 1]; // xi is Smi. - // if (x == 0 || n == 0) { - // return 1; - // } - // uint32_t* mip = &m_digits[i >> 1]; // i is Smi. - // uint32_t* ajp = &a_digits[j >> 1]; // j is Smi. - // uint32_t c = 0; - // SmiUntag(n); - // do { - // uint32_t mi = *mip++; - // uint32_t aj = *ajp; - // uint64_t t = x*mi + aj + c; // 32-bit * 32-bit -> 64-bit. - // *ajp++ = low32(t); - // c = high32(t); - // } while (--n > 0); - // while (c != 0) { - // uint64_t t = *ajp + c; - // *ajp++ = low32(t); - // c = high32(t); // c == 0 or 1. - // } - // return 1; - // } - - Label done; - // T3 = x, no_op if x == 0 - __ lw(T0, Address(SP, 5 * kWordSize)); // T0 = xi as Smi. - __ lw(T1, Address(SP, 6 * kWordSize)); // T1 = x_digits. - __ sll(T0, T0, 1); - __ addu(T1, T0, T1); - __ lw(T3, FieldAddress(T1, TypedData::data_offset())); - __ beq(T3, ZR, &done); - - // T6 = SmiUntag(n), no_op if n == 0 - __ lw(T6, Address(SP, 0 * kWordSize)); - __ SmiUntag(T6); - __ beq(T6, ZR, &done); - __ delay_slot()->addiu(T6, T6, Immediate(-1)); // ... while (n-- > 0). - - // T4 = mip = &m_digits[i >> 1] - __ lw(T0, Address(SP, 3 * kWordSize)); // T0 = i as Smi. - __ lw(T1, Address(SP, 4 * kWordSize)); // T1 = m_digits. - __ sll(T0, T0, 1); - __ addu(T1, T0, T1); - __ addiu(T4, T1, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // T5 = ajp = &a_digits[j >> 1] - __ lw(T0, Address(SP, 1 * kWordSize)); // T0 = j as Smi. - __ lw(T1, Address(SP, 2 * kWordSize)); // T1 = a_digits. - __ sll(T0, T0, 1); - __ addu(T1, T0, T1); - __ addiu(T5, T1, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // T1 = c = 0 - __ mov(T1, ZR); - - Label muladd_loop; - __ Bind(&muladd_loop); - // x: T3 - // mip: T4 - // ajp: T5 - // c: T1 - // n-1: T6 - - // uint32_t mi = *mip++ - __ lw(T2, Address(T4, 0)); - - // uint32_t aj = *ajp - __ lw(T0, Address(T5, 0)); - - // uint64_t t = x*mi + aj + c - __ multu(T2, T3); // HI:LO = x*mi. - __ addiu(T4, T4, Immediate(Bigint::kBytesPerDigit)); - __ mflo(V0); - __ mfhi(V1); - __ addu(V0, V0, T0); // V0 = low32(x*mi) + aj. - __ sltu(T7, V0, T0); // T7 = carry out of low32(x*mi) + aj. - __ addu(V1, V1, T7); // V1:V0 = x*mi + aj. - __ addu(T0, V0, T1); // T0 = low32(x*mi + aj) + c. - __ sltu(T7, T0, T1); // T7 = carry out of low32(x*mi + aj) + c. - __ addu(T1, V1, T7); // T1 = c = high32(x*mi + aj + c). - - // *ajp++ = low32(t) = T0 - __ sw(T0, Address(T5, 0)); - __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); - - // while (n-- > 0) - __ bgtz(T6, &muladd_loop); - __ delay_slot()->addiu(T6, T6, Immediate(-1)); // --n - - __ beq(T1, ZR, &done); - - // *ajp++ += c - __ lw(T0, Address(T5, 0)); - __ addu(T0, T0, T1); - __ sltu(T1, T0, T1); - __ sw(T0, Address(T5, 0)); - __ beq(T1, ZR, &done); - __ delay_slot()->addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); - - Label propagate_carry_loop; - __ Bind(&propagate_carry_loop); - __ lw(T0, Address(T5, 0)); - __ addiu(T0, T0, Immediate(1)); - __ sw(T0, Address(T5, 0)); - __ beq(T0, ZR, &propagate_carry_loop); - __ delay_slot()->addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); - - __ Bind(&done); - __ addiu(V0, ZR, Immediate(Smi::RawValue(1))); // One digit processed. - __ Ret(); -} - - -void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { - // Pseudo code: - // static int _sqrAdd(Uint32List x_digits, int i, - // Uint32List a_digits, int used) { - // uint32_t* xip = &x_digits[i >> 1]; // i is Smi. - // uint32_t x = *xip++; - // if (x == 0) return 1; - // uint32_t* ajp = &a_digits[i]; // j == 2*i, i is Smi. - // uint32_t aj = *ajp; - // uint64_t t = x*x + aj; - // *ajp++ = low32(t); - // uint64_t c = high32(t); - // int n = ((used - i) >> 1) - 1; // used and i are Smi. - // while (--n >= 0) { - // uint32_t xi = *xip++; - // uint32_t aj = *ajp; - // uint96_t t = 2*x*xi + aj + c; // 2-bit * 32-bit * 32-bit -> 65-bit. - // *ajp++ = low32(t); - // c = high64(t); // 33-bit. - // } - // uint32_t aj = *ajp; - // uint64_t t = aj + c; // 32-bit + 33-bit -> 34-bit. - // *ajp++ = low32(t); - // *ajp = high32(t); - // return 1; - // } - - // T4 = xip = &x_digits[i >> 1] - __ lw(T2, Address(SP, 2 * kWordSize)); // T2 = i as Smi. - __ lw(T3, Address(SP, 3 * kWordSize)); // T3 = x_digits. - __ sll(T0, T2, 1); - __ addu(T3, T0, T3); - __ addiu(T4, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // T3 = x = *xip++, return if x == 0 - Label x_zero; - __ lw(T3, Address(T4, 0)); - __ beq(T3, ZR, &x_zero); - __ delay_slot()->addiu(T4, T4, Immediate(Bigint::kBytesPerDigit)); - - // T5 = ajp = &a_digits[i] - __ lw(T1, Address(SP, 1 * kWordSize)); // a_digits - __ sll(T0, T2, 2); // j == 2*i, i is Smi. - __ addu(T1, T0, T1); - __ addiu(T5, T1, Immediate(TypedData::data_offset() - kHeapObjectTag)); - - // T6:T0 = t = x*x + *ajp - __ lw(T0, Address(T5, 0)); // *ajp. - __ mthi(ZR); - __ mtlo(T0); - __ maddu(T3, T3); // HI:LO = T3*T3 + *ajp. - __ mfhi(T6); - __ mflo(T0); - - // *ajp++ = low32(t) = R0 - __ sw(T0, Address(T5, 0)); - __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); - - // T6 = low32(c) = high32(t) - // T7 = high32(c) = 0 - __ mov(T7, ZR); - - // int n = used - i - 1; while (--n >= 0) ... - __ lw(T0, Address(SP, 0 * kWordSize)); // used is Smi - __ subu(V0, T0, T2); - __ SmiUntag(V0); // V0 = used - i - // int n = used - i - 2; if (n >= 0) ... while (n-- > 0) - __ addiu(V0, V0, Immediate(-2)); - - Label loop, done; - __ bltz(V0, &done); - - __ Bind(&loop); - // x: T3 - // xip: T4 - // ajp: T5 - // c: T7:T6 - // t: A2:A1:A0 (not live at loop entry) - // n: V0 - - // uint32_t xi = *xip++ - __ lw(T2, Address(T4, 0)); - __ addiu(T4, T4, Immediate(Bigint::kBytesPerDigit)); - - // uint32_t aj = *ajp - __ lw(T0, Address(T5, 0)); - - // uint96_t t = T7:T6:T0 = 2*x*xi + aj + c - __ multu(T2, T3); - __ mfhi(A1); - __ mflo(A0); // A1:A0 = x*xi. - __ srl(A2, A1, 31); - __ sll(A1, A1, 1); - __ srl(T1, A0, 31); - __ or_(A1, A1, T1); - __ sll(A0, A0, 1); // A2:A1:A0 = 2*x*xi. - __ addu(A0, A0, T0); - __ sltu(T1, A0, T0); - __ addu(A1, A1, T1); // No carry out possible; A2:A1:A0 = 2*x*xi + aj. - __ addu(T0, A0, T6); - __ sltu(T1, T0, T6); - __ addu(T6, A1, T1); // No carry out; A2:T6:T0 = 2*x*xi + aj + low32(c). - __ addu(T6, T6, T7); // No carry out; A2:T6:T0 = 2*x*xi + aj + c. - __ mov(T7, A2); // T7:T6:T0 = 2*x*xi + aj + c. - - // *ajp++ = low32(t) = T0 - __ sw(T0, Address(T5, 0)); - __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); - - // while (n-- > 0) - __ bgtz(V0, &loop); - __ delay_slot()->addiu(V0, V0, Immediate(-1)); // --n - - __ Bind(&done); - // uint32_t aj = *ajp - __ lw(T0, Address(T5, 0)); - - // uint64_t t = aj + c - __ addu(T6, T6, T0); - __ sltu(T1, T6, T0); - __ addu(T7, T7, T1); - - // *ajp = low32(t) = T6 - // *(ajp + 1) = high32(t) = T7 - __ sw(T6, Address(T5, 0)); - __ sw(T7, Address(T5, Bigint::kBytesPerDigit)); - - __ Bind(&x_zero); - __ addiu(V0, ZR, Immediate(Smi::RawValue(1))); // One digit processed. - __ Ret(); -} - - -void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { - // No unsigned 64-bit / 32-bit divide instruction. -} - - -void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { - // Pseudo code: - // static int _mulMod(Uint32List args, Uint32List digits, int i) { - // uint32_t rho = args[_RHO]; // _RHO == 2. - // uint32_t d = digits[i >> 1]; // i is Smi. - // uint64_t t = rho*d; - // args[_MU] = t mod DIGIT_BASE; // _MU == 4. - // return 1; - // } - - // T4 = args - __ lw(T4, Address(SP, 2 * kWordSize)); // args - - // T3 = rho = args[2] - __ lw(T3, FieldAddress( - T4, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit)); - - // T2 = d = digits[i >> 1] - __ lw(T0, Address(SP, 0 * kWordSize)); // T0 = i as Smi. - __ lw(T1, Address(SP, 1 * kWordSize)); // T1 = digits. - __ sll(T0, T0, 1); - __ addu(T1, T0, T1); - __ lw(T2, FieldAddress(T1, TypedData::data_offset())); - - // HI:LO = t = rho*d - __ multu(T2, T3); - - // args[4] = t mod DIGIT_BASE = low32(t) - __ mflo(T0); - __ sw(T0, FieldAddress( - T4, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit)); - - __ addiu(V0, ZR, Immediate(Smi::RawValue(1))); // One digit processed. - __ Ret(); -} - - -// Check if the last argument is a double, jump to label 'is_smi' if smi -// (easy to convert to double), otherwise jump to label 'not_double_smi', -// Returns the last argument in T0. -static void TestLastArgumentIsDouble(Assembler* assembler, - Label* is_smi, - Label* not_double_smi) { - __ lw(T0, Address(SP, 0 * kWordSize)); - __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); - __ beq(CMPRES1, ZR, is_smi); - __ LoadClassId(CMPRES1, T0); - __ BranchNotEqual(CMPRES1, Immediate(kDoubleCid), not_double_smi); - // Fall through with Double in T0. -} - - -// Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown -// type. Return true or false object in the register V0. Any NaN argument -// returns false. Any non-double arg1 causes control flow to fall through to the -// slow case (compiled method body). -static void CompareDoubles(Assembler* assembler, RelationOperator rel_op) { - Label is_smi, double_op, no_NaN, fall_through; - __ Comment("CompareDoubles Intrinsic"); - - TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); - // Both arguments are double, right operand is in T0. - __ LoadDFromOffset(D1, T0, Double::value_offset() - kHeapObjectTag); - __ Bind(&double_op); - __ lw(T0, Address(SP, 1 * kWordSize)); // Left argument. - __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); - // Now, left is in D0, right is in D1. - - __ cund(D0, D1); // Check for NaN. - __ bc1f(&no_NaN); - __ LoadObject(V0, Bool::False()); // Return false if either is NaN. - __ Ret(); - __ Bind(&no_NaN); - - switch (rel_op) { - case EQ: - __ ceqd(D0, D1); - break; - case LT: - __ coltd(D0, D1); - break; - case LE: - __ coled(D0, D1); - break; - case GT: - __ coltd(D1, D0); - break; - case GE: - __ coled(D1, D0); - break; - default: { - // Only passing the above conditions to this function. - UNREACHABLE(); - break; - } - } - - Label is_true; - __ bc1t(&is_true); - __ LoadObject(V0, Bool::False()); - __ Ret(); - __ Bind(&is_true); - __ LoadObject(V0, Bool::True()); - __ Ret(); - - - __ Bind(&is_smi); - __ SmiUntag(T0); - __ mtc1(T0, STMP1); - __ b(&double_op); - __ delay_slot()->cvtdw(D1, STMP1); - - - __ Bind(&fall_through); -} - - -void Intrinsifier::Double_greaterThan(Assembler* assembler) { - CompareDoubles(assembler, GT); -} - - -void Intrinsifier::Double_greaterEqualThan(Assembler* assembler) { - CompareDoubles(assembler, GE); -} - - -void Intrinsifier::Double_lessThan(Assembler* assembler) { - CompareDoubles(assembler, LT); -} - - -void Intrinsifier::Double_equal(Assembler* assembler) { - CompareDoubles(assembler, EQ); -} - - -void Intrinsifier::Double_lessEqualThan(Assembler* assembler) { - CompareDoubles(assembler, LE); -} - - -// Expects left argument to be double (receiver). Right argument is unknown. -// Both arguments are on stack. -static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { - Label fall_through, is_smi, double_op; - - TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); - // Both arguments are double, right operand is in T0. - __ lwc1(F2, FieldAddress(T0, Double::value_offset())); - __ lwc1(F3, FieldAddress(T0, Double::value_offset() + kWordSize)); - __ Bind(&double_op); - __ lw(T0, Address(SP, 1 * kWordSize)); // Left argument. - __ lwc1(F0, FieldAddress(T0, Double::value_offset())); - __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); - switch (kind) { - case Token::kADD: - __ addd(D0, D0, D1); - break; - case Token::kSUB: - __ subd(D0, D0, D1); - break; - case Token::kMUL: - __ muld(D0, D0, D1); - break; - case Token::kDIV: - __ divd(D0, D0, D1); - break; - default: - UNREACHABLE(); - } - const Class& double_class = - Class::Handle(Isolate::Current()->object_store()->double_class()); - __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. - __ swc1(F0, FieldAddress(V0, Double::value_offset())); - __ Ret(); - __ delay_slot()->swc1(F1, - FieldAddress(V0, Double::value_offset() + kWordSize)); - - __ Bind(&is_smi); - __ SmiUntag(T0); - __ mtc1(T0, STMP1); - __ b(&double_op); - __ delay_slot()->cvtdw(D1, STMP1); - - __ Bind(&fall_through); -} - - -void Intrinsifier::Double_add(Assembler* assembler) { - DoubleArithmeticOperations(assembler, Token::kADD); -} - - -void Intrinsifier::Double_mul(Assembler* assembler) { - DoubleArithmeticOperations(assembler, Token::kMUL); -} - - -void Intrinsifier::Double_sub(Assembler* assembler) { - DoubleArithmeticOperations(assembler, Token::kSUB); -} - - -void Intrinsifier::Double_div(Assembler* assembler) { - DoubleArithmeticOperations(assembler, Token::kDIV); -} - - -// Left is double right is integer (Bigint, Mint or Smi) -void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { - Label fall_through; - // Only smis allowed. - __ lw(T0, Address(SP, 0 * kWordSize)); - __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, &fall_through); - - // Is Smi. - __ SmiUntag(T0); - __ mtc1(T0, F4); - __ cvtdw(D1, F4); - - __ lw(T0, Address(SP, 1 * kWordSize)); - __ lwc1(F0, FieldAddress(T0, Double::value_offset())); - __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); - __ muld(D0, D0, D1); - const Class& double_class = - Class::Handle(Isolate::Current()->object_store()->double_class()); - __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. - __ swc1(F0, FieldAddress(V0, Double::value_offset())); - __ Ret(); - __ delay_slot()->swc1(F1, - FieldAddress(V0, Double::value_offset() + kWordSize)); - __ Bind(&fall_through); -} - - -void Intrinsifier::DoubleFromInteger(Assembler* assembler) { - Label fall_through; - - __ lw(T0, Address(SP, 0 * kWordSize)); - __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, &fall_through); - - // Is Smi. - __ SmiUntag(T0); - __ mtc1(T0, F4); - __ cvtdw(D0, F4); - const Class& double_class = - Class::Handle(Isolate::Current()->object_store()->double_class()); - __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. - __ swc1(F0, FieldAddress(V0, Double::value_offset())); - __ Ret(); - __ delay_slot()->swc1(F1, - FieldAddress(V0, Double::value_offset() + kWordSize)); - __ Bind(&fall_through); -} - - -void Intrinsifier::Double_getIsNaN(Assembler* assembler) { - Label is_true; - - __ lw(T0, Address(SP, 0 * kWordSize)); - __ lwc1(F0, FieldAddress(T0, Double::value_offset())); - __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); - __ cund(D0, D0); // Check for NaN. - __ bc1t(&is_true); - __ LoadObject(V0, Bool::False()); // Return false if either is NaN. - __ Ret(); - __ Bind(&is_true); - __ LoadObject(V0, Bool::True()); - __ Ret(); -} - - -void Intrinsifier::Double_getIsInfinite(Assembler* assembler) { - Label not_inf; - __ lw(T0, Address(SP, 0 * kWordSize)); - __ lw(T1, FieldAddress(T0, Double::value_offset())); - __ lw(T2, FieldAddress(T0, Double::value_offset() + kWordSize)); - // If the low word isn't zero, then it isn't infinity. - __ bne(T1, ZR, ¬_inf); - // Mask off the sign bit. - __ AndImmediate(T2, T2, 0x7FFFFFFF); - // Compare with +infinity. - __ BranchNotEqual(T2, Immediate(0x7FF00000), ¬_inf); - - __ LoadObject(V0, Bool::True()); - __ Ret(); - - __ Bind(¬_inf); - __ LoadObject(V0, Bool::False()); - __ Ret(); -} - - -void Intrinsifier::Double_getIsNegative(Assembler* assembler) { - Label is_false, is_true, is_zero; - __ lw(T0, Address(SP, 0 * kWordSize)); - __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); - - __ cund(D0, D0); - __ bc1t(&is_false); // NaN -> false. - - __ LoadImmediate(D1, 0.0); - __ ceqd(D0, D1); - __ bc1t(&is_zero); // Check for negative zero. - - __ coled(D1, D0); - __ bc1t(&is_false); // >= 0 -> false. - - __ Bind(&is_true); - __ LoadObject(V0, Bool::True()); - __ Ret(); - - __ Bind(&is_false); - __ LoadObject(V0, Bool::False()); - __ Ret(); - - __ Bind(&is_zero); - // Check for negative zero by looking at the sign bit. - __ mfc1(T0, F1); // Moves bits 32...63 of D0 to T0. - __ srl(T0, T0, 31); // Get the sign bit down to bit 0 of T0. - __ andi(CMPRES1, T0, Immediate(1)); // Check if the bit is set. - __ bne(T0, ZR, &is_true); // Sign bit set. True. - __ b(&is_false); -} - - -void Intrinsifier::DoubleToInteger(Assembler* assembler) { - __ lw(T0, Address(SP, 0 * kWordSize)); - __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); - - __ truncwd(F2, D0); - __ mfc1(V0, F2); - - // Overflow is signaled with minint. - Label fall_through; - // Check for overflow and that it fits into Smi. - __ LoadImmediate(TMP, 0xC0000000); - __ subu(CMPRES1, V0, TMP); - __ bltz(CMPRES1, &fall_through); - __ Ret(); - __ delay_slot()->SmiTag(V0); - __ Bind(&fall_through); -} - - -void Intrinsifier::MathSqrt(Assembler* assembler) { - Label fall_through, is_smi, double_op; - TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); - // Argument is double and is in T0. - __ LoadDFromOffset(D1, T0, Double::value_offset() - kHeapObjectTag); - __ Bind(&double_op); - __ sqrtd(D0, D1); - const Class& double_class = - Class::Handle(Isolate::Current()->object_store()->double_class()); - __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. - __ swc1(F0, FieldAddress(V0, Double::value_offset())); - __ Ret(); - __ delay_slot()->swc1(F1, - FieldAddress(V0, Double::value_offset() + kWordSize)); - - __ Bind(&is_smi); - __ SmiUntag(T0); - __ mtc1(T0, F2); - __ b(&double_op); - __ delay_slot()->cvtdw(D1, F2); - __ Bind(&fall_through); -} - - -// var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; -// _state[kSTATE_LO] = state & _MASK_32; -// _state[kSTATE_HI] = state >> 32; -void Intrinsifier::Random_nextState(Assembler* assembler) { - const Library& math_lib = Library::Handle(Library::MathLibrary()); - ASSERT(!math_lib.IsNull()); - const Class& random_class = - Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random())); - ASSERT(!random_class.IsNull()); - const Field& state_field = Field::ZoneHandle( - random_class.LookupInstanceFieldAllowPrivate(Symbols::_state())); - ASSERT(!state_field.IsNull()); - const Field& random_A_field = Field::ZoneHandle( - random_class.LookupStaticFieldAllowPrivate(Symbols::_A())); - ASSERT(!random_A_field.IsNull()); - ASSERT(random_A_field.is_const()); - Instance& a_value = Instance::Handle(random_A_field.StaticValue()); - if (a_value.raw() == Object::sentinel().raw() || - a_value.raw() == Object::transition_sentinel().raw()) { - random_A_field.EvaluateInitializer(); - a_value = random_A_field.StaticValue(); - } - const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); - // 'a_int_value' is a mask. - ASSERT(Utils::IsUint(32, a_int_value)); - int32_t a_int32_value = static_cast(a_int_value); - - // Receiver. - __ lw(T0, Address(SP, 0 * kWordSize)); - // Field '_state'. - __ lw(T1, FieldAddress(T0, state_field.Offset())); - - // Addresses of _state[0] and _state[1]. - const intptr_t scale = Instance::ElementSizeFor(kTypedDataUint32ArrayCid); - const intptr_t offset = Instance::DataOffsetFor(kTypedDataUint32ArrayCid); - const Address& addr_0 = FieldAddress(T1, 0 * scale + offset); - const Address& addr_1 = FieldAddress(T1, 1 * scale + offset); - - __ LoadImmediate(T0, a_int32_value); - __ lw(T2, addr_0); - __ lw(T3, addr_1); - __ mtlo(T3); - __ mthi(ZR); // HI:LO <- ZR:T3 Zero extend T3 into HI. - // 64-bit multiply and accumulate into T6:T3. - __ maddu(T0, T2); // HI:LO <- HI:LO + T0 * T2. - __ mflo(T3); - __ mfhi(T6); - __ sw(T3, addr_0); - __ sw(T6, addr_1); - __ Ret(); -} - - -void Intrinsifier::ObjectEquals(Assembler* assembler) { - Label is_true; - - __ lw(T0, Address(SP, 0 * kWordSize)); - __ lw(T1, Address(SP, 1 * kWordSize)); - __ beq(T0, T1, &is_true); - __ LoadObject(V0, Bool::False()); - __ Ret(); - __ Bind(&is_true); - __ LoadObject(V0, Bool::True()); - __ Ret(); -} - - -enum RangeCheckCondition { kIfNotInRange, kIfInRange }; - - -static void RangeCheck(Assembler* assembler, - Register val, - Register tmp, - intptr_t low, - intptr_t high, - RangeCheckCondition cc, - Label* target) { - __ AddImmediate(tmp, val, -low); - if (cc == kIfInRange) { - __ BranchUnsignedLessEqual(tmp, Immediate(high - low), target); - } else { - ASSERT(cc == kIfNotInRange); - __ BranchUnsignedGreater(tmp, Immediate(high - low), target); - } -} - - -static void JumpIfInteger(Assembler* assembler, - Register cid, - Register tmp, - Label* target) { - RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfInRange, target); -} - - -static void JumpIfNotInteger(Assembler* assembler, - Register cid, - Register tmp, - Label* target) { - RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); -} - - -static void JumpIfString(Assembler* assembler, - Register cid, - Register tmp, - Label* target) { - RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, - kIfInRange, target); -} - - -static void JumpIfNotString(Assembler* assembler, - Register cid, - Register tmp, - Label* target) { - RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, - kIfNotInRange, target); -} - - -// Return type quickly for simple types (not parameterized and not signature). -void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { - Label fall_through, use_canonical_type, not_integer, not_double; - __ lw(T0, Address(SP, 0 * kWordSize)); - __ LoadClassIdMayBeSmi(T1, T0); - - // Closures are handled in the runtime. - __ BranchEqual(T1, Immediate(kClosureCid), &fall_through); - - __ BranchUnsignedGreaterEqual(T1, Immediate(kNumPredefinedCids), - &use_canonical_type); - - __ BranchNotEqual(T1, Immediate(kDoubleCid), ¬_double); - // Object is a double. - __ LoadIsolate(T1); - __ LoadFromOffset(T1, T1, Isolate::object_store_offset()); - __ LoadFromOffset(V0, T1, ObjectStore::double_type_offset()); - __ Ret(); - - __ Bind(¬_double); - JumpIfNotInteger(assembler, T1, T2, ¬_integer); - // Object is an integer. - __ LoadIsolate(T1); - __ LoadFromOffset(T1, T1, Isolate::object_store_offset()); - __ LoadFromOffset(V0, T1, ObjectStore::int_type_offset()); - __ Ret(); - - __ Bind(¬_integer); - JumpIfNotString(assembler, T1, T2, &use_canonical_type); - // Object is a string. - __ LoadIsolate(T1); - __ LoadFromOffset(T1, T1, Isolate::object_store_offset()); - __ LoadFromOffset(V0, T1, ObjectStore::string_type_offset()); - __ Ret(); - - __ Bind(&use_canonical_type); - __ LoadClassById(T2, T1); - __ lhu(T1, FieldAddress(T2, Class::num_type_arguments_offset())); - __ BranchNotEqual(T1, Immediate(0), &fall_through); - - __ lw(V0, FieldAddress(T2, Class::canonical_type_offset())); - __ BranchEqual(V0, Object::null_object(), &fall_through); - __ Ret(); - - __ Bind(&fall_through); -} - - -void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) { - Label fall_through, different_cids, equal, not_equal, not_integer; - - __ lw(T0, Address(SP, 0 * kWordSize)); - __ LoadClassIdMayBeSmi(T1, T0); - - // Closures are handled in the runtime. - __ BranchEqual(T1, Immediate(kClosureCid), &fall_through); - - __ lw(T0, Address(SP, 1 * kWordSize)); - __ LoadClassIdMayBeSmi(T2, T0); - - // Check whether class ids match. If class ids don't match objects can still - // have the same runtime type (e.g. multiple string implementation classes - // map to a single String type). - __ BranchNotEqual(T1, T2, &different_cids); - - // Objects have the same class and neither is a closure. - // Check if there are no type arguments. In this case we can return true. - // Otherwise fall through into the runtime to handle comparison. - __ LoadClassById(T2, T1); - __ lhu(T1, FieldAddress(T2, Class::num_type_arguments_offset())); - __ BranchNotEqual(T1, Immediate(0), &fall_through); - - __ Bind(&equal); - __ LoadObject(V0, Bool::True()); - __ Ret(); - - // Class ids are different. Check if we are comparing runtime types of - // two strings (with different representations) or two integers. - __ Bind(&different_cids); - __ BranchUnsignedGreaterEqual(T1, Immediate(kNumPredefinedCids), ¬_equal); - - // Check if both are integers. - JumpIfNotInteger(assembler, T1, T0, ¬_integer); - JumpIfInteger(assembler, T2, T0, &equal); - __ b(¬_equal); - - __ Bind(¬_integer); - // Check if both are strings. - JumpIfNotString(assembler, T1, T0, ¬_equal); - JumpIfString(assembler, T2, T0, &equal); - - // Neither strings nor integers and have different class ids. - __ Bind(¬_equal); - __ LoadObject(V0, Bool::False()); - __ Ret(); - - __ Bind(&fall_through); -} - - -void Intrinsifier::String_getHashCode(Assembler* assembler) { - Label fall_through; - __ lw(T0, Address(SP, 0 * kWordSize)); - __ lw(V0, FieldAddress(T0, String::hash_offset())); - __ beq(V0, ZR, &fall_through); - __ Ret(); - __ Bind(&fall_through); // Hash not yet computed. -} - - -void GenerateSubstringMatchesSpecialization(Assembler* assembler, - intptr_t receiver_cid, - intptr_t other_cid, - Label* return_true, - Label* return_false) { - __ SmiUntag(A1); - __ lw(T1, FieldAddress(A0, String::length_offset())); // this.length - __ SmiUntag(T1); - __ lw(T2, FieldAddress(A2, String::length_offset())); // other.length - __ SmiUntag(T2); - - // if (other.length == 0) return true; - __ beq(T2, ZR, return_true); - - // if (start < 0) return false; - __ bltz(A1, return_false); - - // if (start + other.length > this.length) return false; - __ addu(T0, A1, T2); - __ BranchSignedGreater(T0, T1, return_false); - - if (receiver_cid == kOneByteStringCid) { - __ AddImmediate(A0, A0, OneByteString::data_offset() - kHeapObjectTag); - __ addu(A0, A0, A1); - } else { - ASSERT(receiver_cid == kTwoByteStringCid); - __ AddImmediate(A0, A0, TwoByteString::data_offset() - kHeapObjectTag); - __ addu(A0, A0, A1); - __ addu(A0, A0, A1); - } - if (other_cid == kOneByteStringCid) { - __ AddImmediate(A2, A2, OneByteString::data_offset() - kHeapObjectTag); - } else { - ASSERT(other_cid == kTwoByteStringCid); - __ AddImmediate(A2, A2, TwoByteString::data_offset() - kHeapObjectTag); - } - - // i = 0 - __ LoadImmediate(T0, 0); - - // do - Label loop; - __ Bind(&loop); - - if (receiver_cid == kOneByteStringCid) { - __ lbu(T3, Address(A0, 0)); // this.codeUnitAt(i + start) - } else { - __ lhu(T3, Address(A0, 0)); // this.codeUnitAt(i + start) - } - if (other_cid == kOneByteStringCid) { - __ lbu(T4, Address(A2, 0)); // other.codeUnitAt(i) - } else { - __ lhu(T4, Address(A2, 0)); // other.codeUnitAt(i) - } - __ bne(T3, T4, return_false); - - // i++, while (i < len) - __ AddImmediate(T0, T0, 1); - __ AddImmediate(A0, A0, receiver_cid == kOneByteStringCid ? 1 : 2); - __ AddImmediate(A2, A2, other_cid == kOneByteStringCid ? 1 : 2); - __ BranchSignedLess(T0, T2, &loop); - - __ b(return_true); -} - - -// bool _substringMatches(int start, String other) -// This intrinsic handles a OneByteString or TwoByteString receiver with a -// OneByteString other. -void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) { - Label fall_through, return_true, return_false, try_two_byte; - __ lw(A0, Address(SP, 2 * kWordSize)); // this - __ lw(A1, Address(SP, 1 * kWordSize)); // start - __ lw(A2, Address(SP, 0 * kWordSize)); // other - - __ andi(CMPRES1, A1, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, &fall_through); // 'start' is not a Smi. - - __ LoadClassId(CMPRES1, A2); - __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &fall_through); - - __ LoadClassId(CMPRES1, A0); - __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &try_two_byte); - - GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid, - kOneByteStringCid, &return_true, - &return_false); - - __ Bind(&try_two_byte); - __ LoadClassId(CMPRES1, A0); - __ BranchNotEqual(CMPRES1, Immediate(kTwoByteStringCid), &fall_through); - - GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid, - kOneByteStringCid, &return_true, - &return_false); - - __ Bind(&return_true); - __ LoadObject(V0, Bool::True()); - __ Ret(); - - __ Bind(&return_false); - __ LoadObject(V0, Bool::False()); - __ Ret(); - - __ Bind(&fall_through); -} - - -void Intrinsifier::StringBaseCharAt(Assembler* assembler) { - Label fall_through, try_two_byte_string; - - __ lw(T1, Address(SP, 0 * kWordSize)); // Index. - __ lw(T0, Address(SP, 1 * kWordSize)); // String. - - // Checks. - __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, &fall_through); // Index is not a Smi. - __ lw(T2, FieldAddress(T0, String::length_offset())); // Range check. - // Runtime throws exception. - __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); - __ LoadClassId(CMPRES1, T0); // Class ID check. - __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), - &try_two_byte_string); - - // Grab byte and return. - __ SmiUntag(T1); - __ addu(T2, T0, T1); - __ lbu(T2, FieldAddress(T2, OneByteString::data_offset())); - __ BranchUnsignedGreaterEqual( - T2, Immediate(Symbols::kNumberOfOneCharCodeSymbols), &fall_through); - __ lw(V0, Address(THR, Thread::predefined_symbols_address_offset())); - __ AddImmediate(V0, Symbols::kNullCharCodeSymbolOffset * kWordSize); - __ sll(T2, T2, 2); - __ addu(T2, T2, V0); - __ Ret(); - __ delay_slot()->lw(V0, Address(T2)); - - __ Bind(&try_two_byte_string); - __ BranchNotEqual(CMPRES1, Immediate(kTwoByteStringCid), &fall_through); - ASSERT(kSmiTagShift == 1); - __ addu(T2, T0, T1); - __ lhu(T2, FieldAddress(T2, TwoByteString::data_offset())); - __ BranchUnsignedGreaterEqual( - T2, Immediate(Symbols::kNumberOfOneCharCodeSymbols), &fall_through); - __ lw(V0, Address(THR, Thread::predefined_symbols_address_offset())); - __ AddImmediate(V0, Symbols::kNullCharCodeSymbolOffset * kWordSize); - __ sll(T2, T2, 2); - __ addu(T2, T2, V0); - __ Ret(); - __ delay_slot()->lw(V0, Address(T2)); - - __ Bind(&fall_through); -} - - -void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { - Label is_true; - - __ lw(T0, Address(SP, 0 * kWordSize)); - __ lw(T0, FieldAddress(T0, String::length_offset())); - - __ beq(T0, ZR, &is_true); - __ LoadObject(V0, Bool::False()); - __ Ret(); - __ Bind(&is_true); - __ LoadObject(V0, Bool::True()); - __ Ret(); -} - - -void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { - Label no_hash; - - __ lw(T1, Address(SP, 0 * kWordSize)); - __ lw(V0, FieldAddress(T1, String::hash_offset())); - __ beq(V0, ZR, &no_hash); - __ Ret(); // Return if already computed. - __ Bind(&no_hash); - - __ lw(T2, FieldAddress(T1, String::length_offset())); - - Label done; - // If the string is empty, set the hash to 1, and return. - __ BranchEqual(T2, Immediate(Smi::RawValue(0)), &done); - __ delay_slot()->mov(V0, ZR); - - __ SmiUntag(T2); - __ AddImmediate(T3, T1, OneByteString::data_offset() - kHeapObjectTag); - __ addu(T4, T3, T2); - // V0: Hash code, untagged integer. - // T1: Instance of OneByteString. - // T2: String length, untagged integer. - // T3: String data start. - // T4: String data end. - - Label loop; - // Add to hash code: (hash_ is uint32) - // hash_ += ch; - // hash_ += hash_ << 10; - // hash_ ^= hash_ >> 6; - // Get one characters (ch). - __ Bind(&loop); - __ lbu(T5, Address(T3)); - // T5: ch. - __ addiu(T3, T3, Immediate(1)); - __ addu(V0, V0, T5); - __ sll(T6, V0, 10); - __ addu(V0, V0, T6); - __ srl(T6, V0, 6); - __ bne(T3, T4, &loop); - __ delay_slot()->xor_(V0, V0, T6); - - // Finalize. - // hash_ += hash_ << 3; - // hash_ ^= hash_ >> 11; - // hash_ += hash_ << 15; - __ sll(T6, V0, 3); - __ addu(V0, V0, T6); - __ srl(T6, V0, 11); - __ xor_(V0, V0, T6); - __ sll(T6, V0, 15); - __ addu(V0, V0, T6); - // hash_ = hash_ & ((static_cast(1) << bits) - 1); - __ LoadImmediate(T6, (static_cast(1) << String::kHashBits) - 1); - __ and_(V0, V0, T6); - __ Bind(&done); - - __ LoadImmediate(T2, 1); - __ movz(V0, T2, V0); // If V0 is 0, set to 1. - __ SmiTag(V0); - - __ Ret(); - __ delay_slot()->sw(V0, FieldAddress(T1, String::hash_offset())); -} - - -// Allocates one-byte string of length 'end - start'. The content is not -// initialized. -// 'length-reg' (T2) contains tagged length. -// Returns new string as tagged pointer in V0. -static void TryAllocateOnebyteString(Assembler* assembler, - Label* ok, - Label* failure) { - const Register length_reg = T2; - NOT_IN_PRODUCT(__ MaybeTraceAllocation(kOneByteStringCid, V0, failure)); - __ mov(T6, length_reg); // Save the length register. - // TODO(koda): Protect against negative length and overflow here. - __ SmiUntag(length_reg); - const intptr_t fixed_size_plus_alignment_padding = - sizeof(RawString) + kObjectAlignment - 1; - __ AddImmediate(length_reg, fixed_size_plus_alignment_padding); - __ LoadImmediate(TMP, ~(kObjectAlignment - 1)); - __ and_(length_reg, length_reg, TMP); - - const intptr_t cid = kOneByteStringCid; - Heap::Space space = Heap::kNew; - __ lw(T3, Address(THR, Thread::heap_offset())); - __ lw(V0, Address(T3, Heap::TopOffset(space))); - - // length_reg: allocation size. - __ addu(T1, V0, length_reg); - __ BranchUnsignedLess(T1, V0, failure); // Fail on unsigned overflow. - - // Check if the allocation fits into the remaining space. - // V0: potential new object start. - // T1: potential next object start. - // T2: allocation size. - // T3: heap. - __ lw(T4, Address(T3, Heap::EndOffset(space))); - __ BranchUnsignedGreaterEqual(T1, T4, failure); - - // Successfully allocated the object(s), now update top to point to - // next object start and initialize the object. - __ sw(T1, Address(T3, Heap::TopOffset(space))); - __ AddImmediate(V0, kHeapObjectTag); - - NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T3, space)); - - // Initialize the tags. - // V0: new object start as a tagged pointer. - // T1: new object end address. - // T2: allocation size. - { - Label overflow, done; - const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; - - __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), - &overflow); - __ b(&done); - __ delay_slot()->sll(T2, T2, shift); - __ Bind(&overflow); - __ mov(T2, ZR); - __ Bind(&done); - - // Get the class index and insert it into the tags. - // T2: size and bit tags. - __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); - __ or_(T2, T2, TMP); - __ sw(T2, FieldAddress(V0, String::tags_offset())); // Store tags. - } - - // Set the length field using the saved length (T6). - __ StoreIntoObjectNoBarrier(V0, FieldAddress(V0, String::length_offset()), - T6); - // Clear hash. - __ b(ok); - __ delay_slot()->sw(ZR, FieldAddress(V0, String::hash_offset())); -} - - -// Arg0: OneByteString (receiver). -// Arg1: Start index as Smi. -// Arg2: End index as Smi. -// The indexes must be valid. -void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { - const intptr_t kStringOffset = 2 * kWordSize; - const intptr_t kStartIndexOffset = 1 * kWordSize; - const intptr_t kEndIndexOffset = 0 * kWordSize; - Label fall_through, ok; - - __ lw(T2, Address(SP, kEndIndexOffset)); - __ lw(TMP, Address(SP, kStartIndexOffset)); - __ or_(CMPRES1, T2, TMP); - __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, &fall_through); // 'start', 'end' not Smi. - - __ subu(T2, T2, TMP); - TryAllocateOnebyteString(assembler, &ok, &fall_through); - __ Bind(&ok); - // V0: new string as tagged pointer. - // Copy string. - __ lw(T3, Address(SP, kStringOffset)); - __ lw(T1, Address(SP, kStartIndexOffset)); - __ SmiUntag(T1); - __ addu(T3, T3, T1); - __ AddImmediate(T3, OneByteString::data_offset() - 1); - - // T3: Start address to copy from (untagged). - // T1: Untagged start index. - __ lw(T2, Address(SP, kEndIndexOffset)); - __ SmiUntag(T2); - __ subu(T2, T2, T1); - - // T3: Start address to copy from (untagged). - // T2: Untagged number of bytes to copy. - // V0: Tagged result string. - // T6: Pointer into T3. - // T7: Pointer into T0. - // T1: Scratch register. - Label loop, done; - __ beq(T2, ZR, &done); - __ mov(T6, T3); - __ mov(T7, V0); - - __ Bind(&loop); - __ lbu(T1, Address(T6, 0)); - __ AddImmediate(T6, 1); - __ addiu(T2, T2, Immediate(-1)); - __ sb(T1, FieldAddress(T7, OneByteString::data_offset())); - __ bgtz(T2, &loop); - __ delay_slot()->addiu(T7, T7, Immediate(1)); - - __ Bind(&done); - __ Ret(); - __ Bind(&fall_through); -} - - -void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { - __ lw(T2, Address(SP, 0 * kWordSize)); // Value. - __ lw(T1, Address(SP, 1 * kWordSize)); // Index. - __ lw(T0, Address(SP, 2 * kWordSize)); // OneByteString. - __ SmiUntag(T1); - __ SmiUntag(T2); - __ addu(T3, T0, T1); - __ Ret(); - __ delay_slot()->sb(T2, FieldAddress(T3, OneByteString::data_offset())); -} - - -void Intrinsifier::OneByteString_allocate(Assembler* assembler) { - Label fall_through, ok; - - __ lw(T2, Address(SP, 0 * kWordSize)); // Length. - TryAllocateOnebyteString(assembler, &ok, &fall_through); - - __ Bind(&ok); - __ Ret(); - - __ Bind(&fall_through); -} - - -// TODO(srdjan): Add combinations (one-byte/two-byte/external strings). -static void StringEquality(Assembler* assembler, intptr_t string_cid) { - Label fall_through, is_true, is_false, loop; - __ lw(T0, Address(SP, 1 * kWordSize)); // This. - __ lw(T1, Address(SP, 0 * kWordSize)); // Other. - - // Are identical? - __ beq(T0, T1, &is_true); - - // Is other OneByteString? - __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); - __ beq(CMPRES1, ZR, &fall_through); // Other is Smi. - __ LoadClassId(CMPRES1, T1); // Class ID check. - __ BranchNotEqual(CMPRES1, Immediate(string_cid), &fall_through); - - // Have same length? - __ lw(T2, FieldAddress(T0, String::length_offset())); - __ lw(T3, FieldAddress(T1, String::length_offset())); - __ bne(T2, T3, &is_false); - - // Check contents, no fall-through possible. - ASSERT((string_cid == kOneByteStringCid) || - (string_cid == kTwoByteStringCid)); - __ SmiUntag(T2); - __ Bind(&loop); - __ AddImmediate(T2, -1); - __ BranchSignedLess(T2, Immediate(0), &is_true); - if (string_cid == kOneByteStringCid) { - __ lbu(V0, FieldAddress(T0, OneByteString::data_offset())); - __ lbu(V1, FieldAddress(T1, OneByteString::data_offset())); - __ AddImmediate(T0, 1); - __ AddImmediate(T1, 1); - } else if (string_cid == kTwoByteStringCid) { - __ lhu(V0, FieldAddress(T0, OneByteString::data_offset())); - __ lhu(V1, FieldAddress(T1, OneByteString::data_offset())); - __ AddImmediate(T0, 2); - __ AddImmediate(T1, 2); - } else { - UNIMPLEMENTED(); - } - __ bne(V0, V1, &is_false); - __ b(&loop); - - __ Bind(&is_false); - __ LoadObject(V0, Bool::False()); - __ Ret(); - __ Bind(&is_true); - __ LoadObject(V0, Bool::True()); - __ Ret(); - - __ Bind(&fall_through); -} - - -void Intrinsifier::OneByteString_equality(Assembler* assembler) { - StringEquality(assembler, kOneByteStringCid); -} - - -void Intrinsifier::TwoByteString_equality(Assembler* assembler) { - StringEquality(assembler, kTwoByteStringCid); -} - - -void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler, - bool sticky) { - if (FLAG_interpret_irregexp) return; - - static const intptr_t kRegExpParamOffset = 2 * kWordSize; - static const intptr_t kStringParamOffset = 1 * kWordSize; - // start_index smi is located at 0. - - // Incoming registers: - // T0: Function. (Will be reloaded with the specialized matcher function.) - // S4: Arguments descriptor. (Will be preserved.) - // S5: Unknown. (Must be GC safe on tail call.) - - // Load the specialized function pointer into T0. Leverage the fact the - // string CIDs as well as stored function pointers are in sequence. - __ lw(T1, Address(SP, kRegExpParamOffset)); - __ lw(T3, Address(SP, kStringParamOffset)); - __ LoadClassId(T2, T3); - __ AddImmediate(T2, -kOneByteStringCid); - __ sll(T2, T2, kWordSizeLog2); - __ addu(T2, T2, T1); - __ lw(T0, - FieldAddress(T2, RegExp::function_offset(kOneByteStringCid, sticky))); - - // Registers are now set up for the lazy compile stub. It expects the function - // in T0, the argument descriptor in S4, and IC-Data in S5. - __ mov(S5, ZR); - - // Tail-call the function. - __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); - __ lw(T3, FieldAddress(T0, Function::entry_point_offset())); - __ jr(T3); -} - - -// On stack: user tag (+0). -void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { - // T1: Isolate. - __ LoadIsolate(T1); - // V0: Current user tag. - __ lw(V0, Address(T1, Isolate::current_tag_offset())); - // T2: UserTag. - __ lw(T2, Address(SP, +0 * kWordSize)); - // Set Isolate::current_tag_. - __ sw(T2, Address(T1, Isolate::current_tag_offset())); - // T2: UserTag's tag. - __ lw(T2, FieldAddress(T2, UserTag::tag_offset())); - // Set Isolate::user_tag_. - __ sw(T2, Address(T1, Isolate::user_tag_offset())); - __ Ret(); - __ delay_slot()->sw(T2, Address(T1, Isolate::user_tag_offset())); -} - - -void Intrinsifier::UserTag_defaultTag(Assembler* assembler) { - __ LoadIsolate(V0); - __ Ret(); - __ delay_slot()->lw(V0, Address(V0, Isolate::default_tag_offset())); -} - - -void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { - __ LoadIsolate(V0); - __ Ret(); - __ delay_slot()->lw(V0, Address(V0, Isolate::current_tag_offset())); -} - - -void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler) { - if (!FLAG_support_timeline) { - __ LoadObject(V0, Bool::False()); - __ Ret(); - return; - } - // Load TimelineStream*. - __ lw(V0, Address(THR, Thread::dart_stream_offset())); - // Load uintptr_t from TimelineStream*. - __ lw(T0, Address(V0, TimelineStream::enabled_offset())); - __ LoadObject(V0, Bool::True()); - __ LoadObject(V1, Bool::False()); - __ Ret(); - __ delay_slot()->movz(V0, V1, T0); // V0 = (T0 == 0) ? V1 : V0. -} - - -void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler) { - __ LoadObject(V0, Object::null_object()); - __ sw(V0, Address(THR, Thread::async_stack_trace_offset())); - __ Ret(); -} - - -void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler) { - __ lw(V0, Address(THR, Thread::async_stack_trace_offset())); - __ LoadObject(V0, Object::null_object()); - __ Ret(); -} - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/malloc_hooks_mips.cc b/runtime/vm/malloc_hooks_mips.cc deleted file mode 100644 index 98c4b31c8b4..00000000000 --- a/runtime/vm/malloc_hooks_mips.cc +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/malloc_hooks.h" - -#include "vm/globals.h" - -#if defined(HOST_ARCH_MIPS) - -namespace dart { - -const intptr_t kSkipCount = 5; - -} // namespace dart - -#endif // defined(HOST_ARCH_MIPS) diff --git a/runtime/vm/object.h b/runtime/vm/object.h index 4514c02705e..5c007ee2624 100644 --- a/runtime/vm/object.h +++ b/runtime/vm/object.h @@ -4230,9 +4230,6 @@ class Instructions : public Object { #elif defined(TARGET_ARCH_ARM64) static const intptr_t kCheckedEntryOffset = 16; static const intptr_t kUncheckedEntryOffset = 40; -#elif defined(TARGET_ARCH_MIPS) - static const intptr_t kCheckedEntryOffset = 12; - static const intptr_t kUncheckedEntryOffset = 52; #elif defined(TARGET_ARCH_DBC) static const intptr_t kCheckedEntryOffset = 0; static const intptr_t kUncheckedEntryOffset = 0; diff --git a/runtime/vm/object_mips_test.cc b/runtime/vm/object_mips_test.cc deleted file mode 100644 index 1c284228bea..00000000000 --- a/runtime/vm/object_mips_test.cc +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "platform/assert.h" -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -#include "vm/assembler.h" -#include "vm/object.h" -#include "vm/unit_test.h" - -namespace dart { - -#define __ assembler-> - - -// Generate a simple dart code sequence. -// This is used to test Code and Instruction object creation. -void GenerateIncrement(Assembler* assembler) { - __ Push(ZR); - __ lw(TMP, Address(SP, 0)); - __ addiu(TMP, TMP, Immediate(1)); - __ sw(TMP, Address(SP, 0)); - __ lw(TMP, Address(SP, 0)); - __ addiu(TMP, TMP, Immediate(1)); - __ Pop(V0); - __ mov(V0, TMP); - __ Ret(); -} - - -// Generate a dart code sequence that embeds a string object in it. -// This is used to test Embedded String objects in the instructions. -void GenerateEmbedStringInCode(Assembler* assembler, const char* str) { - __ EnterDartFrame(0); // To setup pp. - const String& string_object = - String::ZoneHandle(String::New(str, Heap::kOld)); - __ LoadObject(V0, string_object); - __ LeaveDartFrameAndReturn(); -} - - -// Generate a dart code sequence that embeds a smi object in it. -// This is used to test Embedded Smi objects in the instructions. -void GenerateEmbedSmiInCode(Assembler* assembler, intptr_t value) { - // No need to setup pp, since Smis are not stored in the object pool. - const Smi& smi_object = Smi::ZoneHandle(Smi::New(value)); - __ LoadObject(V0, smi_object); - __ Ret(); -} - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/os_android.cc b/runtime/vm/os_android.cc index 5d9a678af8b..e2a7b33097a 100644 --- a/runtime/vm/os_android.cc +++ b/runtime/vm/os_android.cc @@ -209,7 +209,7 @@ intptr_t OS::PreferredCodeAlignment() { #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \ defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_DBC) const int kMinimumAlignment = 32; -#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) +#elif defined(TARGET_ARCH_ARM) const int kMinimumAlignment = 16; #else #error Unsupported architecture. diff --git a/runtime/vm/os_fuchsia.cc b/runtime/vm/os_fuchsia.cc index d304b6f1c88..46f0bc5af45 100644 --- a/runtime/vm/os_fuchsia.cc +++ b/runtime/vm/os_fuchsia.cc @@ -130,7 +130,7 @@ intptr_t OS::PreferredCodeAlignment() { #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \ defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_DBC) const int kMinimumAlignment = 32; -#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) +#elif defined(TARGET_ARCH_ARM) const int kMinimumAlignment = 16; #else #error Unsupported architecture. diff --git a/runtime/vm/os_linux.cc b/runtime/vm/os_linux.cc index be37048ced5..c0f5b8b841c 100644 --- a/runtime/vm/os_linux.cc +++ b/runtime/vm/os_linux.cc @@ -198,7 +198,7 @@ intptr_t OS::ActivationFrameAlignment() { #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \ defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_DBC) const int kMinimumAlignment = 16; -#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) +#elif defined(TARGET_ARCH_ARM) const int kMinimumAlignment = 8; #else #error Unsupported architecture. @@ -217,7 +217,7 @@ intptr_t OS::PreferredCodeAlignment() { #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \ defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_DBC) const int kMinimumAlignment = 32; -#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) +#elif defined(TARGET_ARCH_ARM) const int kMinimumAlignment = 16; #else #error Unsupported architecture. diff --git a/runtime/vm/os_macos.cc b/runtime/vm/os_macos.cc index 9e9241c6774..a358609af28 100644 --- a/runtime/vm/os_macos.cc +++ b/runtime/vm/os_macos.cc @@ -195,7 +195,7 @@ intptr_t OS::PreferredCodeAlignment() { #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \ defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_DBC) const int kMinimumAlignment = 32; -#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) +#elif defined(TARGET_ARCH_ARM) const int kMinimumAlignment = 16; #else #error Unsupported architecture. diff --git a/runtime/vm/os_win.cc b/runtime/vm/os_win.cc index 46aa2b81b11..e301627eeae 100644 --- a/runtime/vm/os_win.cc +++ b/runtime/vm/os_win.cc @@ -191,7 +191,7 @@ int64_t OS::GetCurrentThreadCPUMicros() { intptr_t OS::ActivationFrameAlignment() { #if defined(TARGET_ARCH_ARM64) return 16; -#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) +#elif defined(TARGET_ARCH_ARM) return 8; #elif defined(_WIN64) // Windows 64-bit ABI requires the stack to be 16-byte aligned. @@ -208,7 +208,7 @@ intptr_t OS::PreferredCodeAlignment() { #if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \ defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_DBC) return 32; -#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) +#elif defined(TARGET_ARCH_ARM) return 16; #else #error Unsupported architecture. diff --git a/runtime/vm/precompiler.cc b/runtime/vm/precompiler.cc index cc527012e08..d9800acbd33 100644 --- a/runtime/vm/precompiler.cc +++ b/runtime/vm/precompiler.cc @@ -2798,11 +2798,10 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { HANDLESCOPE(thread()); // We may reattempt compilation if the function needs to be assembled using - // far branches on ARM and MIPS. In the else branch of the setjmp call, - // done is set to false, and use_far_branches is set to true if there is a - // longjmp from the ARM or MIPS assemblers. In all other paths through this - // while loop, done is set to true. use_far_branches is always false on ia32 - // and x64. + // far branches on ARM. In the else branch of the setjmp call, done is set to + // false, and use_far_branches is set to true if there is a longjmp from the + // ARM assembler. In all other paths through this while loop, done is set to + // true. use_far_branches is always false on ia32 and x64. bool done = false; // volatile because the variable may be clobbered by a longjmp. volatile bool use_far_branches = false; diff --git a/runtime/vm/profiler.cc b/runtime/vm/profiler.cc index a7c3f0e9ee7..da856e2844b 100644 --- a/runtime/vm/profiler.cc +++ b/runtime/vm/profiler.cc @@ -32,7 +32,7 @@ static const intptr_t kMaxSamplesPerTick = 4; DEFINE_FLAG(bool, trace_profiled_isolates, false, "Trace profiled isolates."); #if defined(HOST_OS_ANDROID) || defined(TARGET_ARCH_ARM64) || \ - defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) + defined(TARGET_ARCH_ARM) DEFINE_FLAG(int, profile_period, 10000, @@ -288,11 +288,6 @@ bool ReturnAddressLocator::LocateReturnAddress(uword* return_address) { ASSERT(return_address != NULL); return false; } -#elif defined(TARGET_ARCH_MIPS) -bool ReturnAddressLocator::LocateReturnAddress(uword* return_address) { - ASSERT(return_address != NULL); - return false; -} #elif defined(TARGET_ARCH_DBC) bool ReturnAddressLocator::LocateReturnAddress(uword* return_address) { ASSERT(return_address != NULL); diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h index b1626ea5bf2..a94bf639db9 100644 --- a/runtime/vm/raw_object.h +++ b/runtime/vm/raw_object.h @@ -1013,7 +1013,7 @@ class RawField : public RawObject { // any other value otherwise. // Offset to the guarded length field inside an instance of class matching // guarded_cid_. Stored corrected by -kHeapObjectTag to simplify code - // generated on platforms with weak addressing modes (ARM, MIPS). + // generated on platforms with weak addressing modes (ARM). int8_t guarded_list_length_in_object_offset_; uint8_t kind_bits_; // static, final, const, has initializer.... diff --git a/runtime/vm/regexp_assembler_ir.cc b/runtime/vm/regexp_assembler_ir.cc index 2bdc701b831..cecc3a653f5 100644 --- a/runtime/vm/regexp_assembler_ir.cc +++ b/runtime/vm/regexp_assembler_ir.cc @@ -302,8 +302,7 @@ void IRRegExpMacroAssembler::FinalizeRegistersArray() { } -#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_ARM) || \ - defined(TARGET_ARCH_MIPS) +#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_ARM) // Disabling unaligned accesses forces the regexp engine to load characters one // by one instead of up to 4 at once, along with the associated performance hit. // TODO(zerny): Be less conservative about disabling unaligned accesses. diff --git a/runtime/vm/runtime_entry.h b/runtime/vm/runtime_entry.h index f4aef09023e..12209996dfa 100644 --- a/runtime/vm/runtime_entry.h +++ b/runtime/vm/runtime_entry.h @@ -130,7 +130,7 @@ class RuntimeEntry : public ValueObject { #define END_LEAF_RUNTIME_ENTRY } -// TODO(rmacnak): Fix alignment issue on simarm and simmips and use +// TODO(rmacnak): Fix alignment issue on simarm and use // DEFINE_LEAF_RUNTIME_ENTRY instead. #define DEFINE_RAW_LEAF_RUNTIME_ENTRY(name, argument_count, is_float, func) \ extern const RuntimeEntry k##name##RuntimeEntry( \ diff --git a/runtime/vm/runtime_entry_mips.cc b/runtime/vm/runtime_entry_mips.cc deleted file mode 100644 index b89781fdc80..00000000000 --- a/runtime/vm/runtime_entry_mips.cc +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -#include "vm/runtime_entry.h" - -#include "vm/assembler.h" -#include "vm/simulator.h" -#include "vm/stub_code.h" - -namespace dart { - -#define __ assembler-> - - -uword RuntimeEntry::GetEntryPoint() const { - // Compute the effective address. When running under the simulator, - // this is a redirection address that forces the simulator to call - // into the runtime system. - uword entry = reinterpret_cast(function()); -#if defined(USING_SIMULATOR) - // Redirection to leaf runtime calls supports a maximum of 4 arguments passed - // in registers (maximum 2 double arguments for leaf float runtime calls). - ASSERT(argument_count() >= 0); - ASSERT(!is_leaf() || (!is_float() && (argument_count() <= 4)) || - (argument_count() <= 2)); - Simulator::CallKind call_kind = - is_leaf() ? (is_float() ? Simulator::kLeafFloatRuntimeCall - : Simulator::kLeafRuntimeCall) - : Simulator::kRuntimeCall; - entry = - Simulator::RedirectExternalReference(entry, call_kind, argument_count()); -#endif - return entry; -} - - -// Generate code to call into the stub which will call the runtime -// function. Input for the stub is as follows: -// SP : points to the arguments and return value array. -// S5 : address of the runtime function to call. -// S4 : number of arguments to the call. -void RuntimeEntry::Call(Assembler* assembler, intptr_t argument_count) const { - if (is_leaf()) { - ASSERT(argument_count == this->argument_count()); - __ lw(T9, Address(THR, Thread::OffsetFromThread(this))); - __ jalr(T9); - } else { - // Argument count is not checked here, but in the runtime entry for a more - // informative error message. - __ lw(S5, Address(THR, Thread::OffsetFromThread(this))); - __ LoadImmediate(S4, argument_count); - __ BranchLinkToRuntime(); - } -} - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/signal_handler_linux.cc b/runtime/vm/signal_handler_linux.cc index bbd5522b280..2f581b24f53 100644 --- a/runtime/vm/signal_handler_linux.cc +++ b/runtime/vm/signal_handler_linux.cc @@ -21,8 +21,6 @@ uintptr_t SignalHandler::GetProgramCounter(const mcontext_t& mcontext) { pc = static_cast(mcontext.arm_pc); #elif defined(HOST_ARCH_ARM64) pc = static_cast(mcontext.pc); -#elif defined(HOST_ARCH_MIPS) - pc = static_cast(mcontext.pc); #else #error Unsupported architecture. #endif // HOST_ARCH_... @@ -41,8 +39,6 @@ uintptr_t SignalHandler::GetFramePointer(const mcontext_t& mcontext) { fp = static_cast(mcontext.arm_fp); #elif defined(HOST_ARCH_ARM64) fp = static_cast(mcontext.regs[29]); -#elif defined(HOST_ARCH_MIPS) - fp = static_cast(mcontext.gregs[30]); #else #error Unsupported architecture. #endif // HOST_ARCH_... @@ -62,8 +58,6 @@ uintptr_t SignalHandler::GetCStackPointer(const mcontext_t& mcontext) { sp = static_cast(mcontext.arm_sp); #elif defined(HOST_ARCH_ARM64) sp = static_cast(mcontext.sp); -#elif defined(HOST_ARCH_MIPS) - sp = static_cast(mcontext.gregs[29]); #else #error Unsupported architecture. #endif // HOST_ARCH_... @@ -91,8 +85,6 @@ uintptr_t SignalHandler::GetLinkRegister(const mcontext_t& mcontext) { lr = static_cast(mcontext.arm_lr); #elif defined(HOST_ARCH_ARM64) lr = static_cast(mcontext.regs[30]); -#elif defined(HOST_ARCH_MIPS) - lr = static_cast(mcontext.gregs[31]); #else #error Unsupported architecture. #endif // HOST_ARCH_... diff --git a/runtime/vm/simulator.h b/runtime/vm/simulator.h index 6dbdbc61f06..c746d45c526 100644 --- a/runtime/vm/simulator.h +++ b/runtime/vm/simulator.h @@ -15,8 +15,6 @@ #include "vm/simulator_arm.h" #elif defined(TARGET_ARCH_ARM64) #include "vm/simulator_arm64.h" -#elif defined(TARGET_ARCH_MIPS) -#include "vm/simulator_mips.h" #elif defined(TARGET_ARCH_DBC) #include "vm/simulator_dbc.h" #else diff --git a/runtime/vm/simulator_mips.cc b/runtime/vm/simulator_mips.cc deleted file mode 100644 index 8045e3413e1..00000000000 --- a/runtime/vm/simulator_mips.cc +++ /dev/null @@ -1,2520 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include // NOLINT -#include - -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -// Only build the simulator if not compiling for real MIPS hardware. -#if defined(USING_SIMULATOR) - -#include "vm/simulator.h" - -#include "vm/assembler.h" -#include "vm/constants_mips.h" -#include "vm/disassembler.h" -#include "vm/lockers.h" -#include "vm/native_arguments.h" -#include "vm/stack_frame.h" -#include "vm/os_thread.h" - -namespace dart { - -DEFINE_FLAG(uint64_t, - trace_sim_after, - ULLONG_MAX, - "Trace simulator execution after instruction count reached."); -DEFINE_FLAG(uint64_t, - stop_sim_at, - ULLONG_MAX, - "Instruction address or instruction count to stop simulator at."); - - -// This macro provides a platform independent use of sscanf. The reason for -// SScanF not being implemented in a platform independent way through -// OS in the same way as SNPrint is that the Windows C Run-Time -// Library does not provide vsscanf. -#define SScanF sscanf // NOLINT - - -// SimulatorSetjmpBuffer are linked together, and the last created one -// is referenced by the Simulator. When an exception is thrown, the exception -// runtime looks at where to jump and finds the corresponding -// SimulatorSetjmpBuffer based on the stack pointer of the exception handler. -// The runtime then does a Longjmp on that buffer to return to the simulator. -class SimulatorSetjmpBuffer { - public: - void Longjmp() { - // "This" is now the last setjmp buffer. - simulator_->set_last_setjmp_buffer(this); - longjmp(buffer_, 1); - } - - explicit SimulatorSetjmpBuffer(Simulator* sim) { - simulator_ = sim; - link_ = sim->last_setjmp_buffer(); - sim->set_last_setjmp_buffer(this); - sp_ = static_cast(sim->get_register(SP)); - } - - ~SimulatorSetjmpBuffer() { - ASSERT(simulator_->last_setjmp_buffer() == this); - simulator_->set_last_setjmp_buffer(link_); - } - - SimulatorSetjmpBuffer* link() { return link_; } - - uword sp() { return sp_; } - - private: - uword sp_; - Simulator* simulator_; - SimulatorSetjmpBuffer* link_; - jmp_buf buffer_; - - friend class Simulator; -}; - - -// The SimulatorDebugger class is used by the simulator while debugging -// simulated MIPS code. -class SimulatorDebugger { - public: - explicit SimulatorDebugger(Simulator* sim); - ~SimulatorDebugger(); - - void Stop(Instr* instr, const char* message); - void Debug(); - char* ReadLine(const char* prompt); - - private: - Simulator* sim_; - - bool GetValue(char* desc, uint32_t* value); - bool GetFValue(char* desc, double* value); - bool GetDValue(char* desc, double* value); - - static TokenPosition GetApproximateTokenIndex(const Code& code, uword pc); - - static void PrintDartFrame(uword pc, - uword fp, - uword sp, - const Function& function, - TokenPosition token_pos, - bool is_optimized, - bool is_inlined); - void PrintBacktrace(); - - // Set or delete a breakpoint. Returns true if successful. - bool SetBreakpoint(Instr* breakpc); - bool DeleteBreakpoint(Instr* breakpc); - - // Undo and redo all breakpoints. This is needed to bracket disassembly and - // execution to skip past breakpoints when run from the debugger. - void UndoBreakpoints(); - void RedoBreakpoints(); -}; - - -SimulatorDebugger::SimulatorDebugger(Simulator* sim) { - sim_ = sim; -} - - -SimulatorDebugger::~SimulatorDebugger() {} - - -void SimulatorDebugger::Stop(Instr* instr, const char* message) { - OS::Print("Simulator hit %s\n", message); - Debug(); -} - - -static Register LookupCpuRegisterByName(const char* name) { - static const char* kNames[] = { - "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", - "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", - "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", - - "zr", "at", "v0", "v1", "a0", "a1", "a2", "a3", - "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", - "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", - "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"}; - static const Register kRegisters[] = { - R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, - R11, R12, R13, R14, R15, R16, R17, R18, R19, R20, R21, - R22, R23, R24, R25, R26, R27, R28, R29, R30, R31, - - ZR, AT, V0, V1, A0, A1, A2, A3, T0, T1, T2, - T3, T4, T5, T6, T7, S0, S1, S2, S3, S4, S5, - S6, S7, T8, T9, K0, K1, GP, SP, FP, RA}; - ASSERT(ARRAY_SIZE(kNames) == ARRAY_SIZE(kRegisters)); - for (unsigned i = 0; i < ARRAY_SIZE(kNames); i++) { - if (strcmp(kNames[i], name) == 0) { - return kRegisters[i]; - } - } - return kNoRegister; -} - - -static FRegister LookupFRegisterByName(const char* name) { - int reg_nr = -1; - bool ok = SScanF(name, "f%d", ®_nr); - if (ok && (0 <= reg_nr) && (reg_nr < kNumberOfFRegisters)) { - return static_cast(reg_nr); - } - return kNoFRegister; -} - - -bool SimulatorDebugger::GetValue(char* desc, uint32_t* value) { - Register reg = LookupCpuRegisterByName(desc); - if (reg != kNoRegister) { - *value = sim_->get_register(reg); - return true; - } - if (desc[0] == '*') { - uint32_t addr; - if (GetValue(desc + 1, &addr)) { - if (Simulator::IsIllegalAddress(addr)) { - return false; - } - *value = *(reinterpret_cast(addr)); - return true; - } - } - if (strcmp("pc", desc) == 0) { - *value = sim_->get_pc(); - return true; - } - bool retval = SScanF(desc, "0x%x", value) == 1; - if (!retval) { - retval = SScanF(desc, "%x", value) == 1; - } - return retval; -} - - -bool SimulatorDebugger::GetFValue(char* desc, double* value) { - FRegister freg = LookupFRegisterByName(desc); - if (freg != kNoFRegister) { - *value = sim_->get_fregister(freg); - return true; - } - if (desc[0] == '*') { - uint32_t addr; - if (GetValue(desc + 1, &addr)) { - if (Simulator::IsIllegalAddress(addr)) { - return false; - } - *value = *(reinterpret_cast(addr)); - return true; - } - } - return false; -} - - -bool SimulatorDebugger::GetDValue(char* desc, double* value) { - FRegister freg = LookupFRegisterByName(desc); - if (freg != kNoFRegister) { - *value = sim_->get_fregister_double(freg); - return true; - } - if (desc[0] == '*') { - uint32_t addr; - if (GetValue(desc + 1, &addr)) { - if (Simulator::IsIllegalAddress(addr)) { - return false; - } - *value = *(reinterpret_cast(addr)); - return true; - } - } - return false; -} - - -TokenPosition SimulatorDebugger::GetApproximateTokenIndex(const Code& code, - uword pc) { - TokenPosition token_pos = TokenPosition::kNoSource; - uword pc_offset = pc - code.PayloadStart(); - const PcDescriptors& descriptors = - PcDescriptors::Handle(code.pc_descriptors()); - PcDescriptors::Iterator iter(descriptors, RawPcDescriptors::kAnyKind); - while (iter.MoveNext()) { - if (iter.PcOffset() == pc_offset) { - return iter.TokenPos(); - } else if (!token_pos.IsReal() && (iter.PcOffset() > pc_offset)) { - token_pos = iter.TokenPos(); - } - } - return token_pos; -} - - -void SimulatorDebugger::PrintDartFrame(uword pc, - uword fp, - uword sp, - const Function& function, - TokenPosition token_pos, - bool is_optimized, - bool is_inlined) { - const Script& script = Script::Handle(function.script()); - const String& func_name = String::Handle(function.QualifiedScrubbedName()); - const String& url = String::Handle(script.url()); - intptr_t line = -1; - intptr_t column = -1; - if (token_pos.IsReal()) { - script.GetTokenLocation(token_pos, &line, &column); - } - OS::Print( - "pc=0x%" Px " fp=0x%" Px " sp=0x%" Px " %s%s (%s:%" Pd ":%" Pd ")\n", pc, - fp, sp, is_optimized ? (is_inlined ? "inlined " : "optimized ") : "", - func_name.ToCString(), url.ToCString(), line, column); -} - - -void SimulatorDebugger::PrintBacktrace() { - StackFrameIterator frames( - sim_->get_register(FP), sim_->get_register(SP), sim_->get_pc(), - StackFrameIterator::kDontValidateFrames, Thread::Current(), - StackFrameIterator::kNoCrossThreadIteration); - StackFrame* frame = frames.NextFrame(); - ASSERT(frame != NULL); - Function& function = Function::Handle(); - Function& inlined_function = Function::Handle(); - Code& code = Code::Handle(); - Code& unoptimized_code = Code::Handle(); - while (frame != NULL) { - if (frame->IsDartFrame()) { - code = frame->LookupDartCode(); - function = code.function(); - if (code.is_optimized()) { - // For optimized frames, extract all the inlined functions if any - // into the stack trace. - InlinedFunctionsIterator it(code, frame->pc()); - while (!it.Done()) { - // Print each inlined frame with its pc in the corresponding - // unoptimized frame. - inlined_function = it.function(); - unoptimized_code = it.code(); - uword unoptimized_pc = it.pc(); - it.Advance(); - if (!it.Done()) { - PrintDartFrame( - unoptimized_pc, frame->fp(), frame->sp(), inlined_function, - GetApproximateTokenIndex(unoptimized_code, unoptimized_pc), - true, true); - } - } - // Print the optimized inlining frame below. - } - PrintDartFrame(frame->pc(), frame->fp(), frame->sp(), function, - GetApproximateTokenIndex(code, frame->pc()), - code.is_optimized(), false); - } else { - OS::Print("pc=0x%" Px " fp=0x%" Px " sp=0x%" Px " %s frame\n", - frame->pc(), frame->fp(), frame->sp(), - frame->IsEntryFrame() - ? "entry" - : frame->IsExitFrame() - ? "exit" - : frame->IsStubFrame() ? "stub" : "invalid"); - } - frame = frames.NextFrame(); - } -} - - -bool SimulatorDebugger::SetBreakpoint(Instr* breakpc) { - // Check if a breakpoint can be set. If not return without any side-effects. - if (sim_->break_pc_ != NULL) { - return false; - } - - // Set the breakpoint. - sim_->break_pc_ = breakpc; - sim_->break_instr_ = breakpc->InstructionBits(); - // Not setting the breakpoint instruction in the code itself. It will be set - // when the debugger shell continues. - return true; -} - - -bool SimulatorDebugger::DeleteBreakpoint(Instr* breakpc) { - if (sim_->break_pc_ != NULL) { - sim_->break_pc_->SetInstructionBits(sim_->break_instr_); - } - - sim_->break_pc_ = NULL; - sim_->break_instr_ = 0; - return true; -} - - -void SimulatorDebugger::UndoBreakpoints() { - if (sim_->break_pc_ != NULL) { - sim_->break_pc_->SetInstructionBits(sim_->break_instr_); - } -} - - -void SimulatorDebugger::RedoBreakpoints() { - if (sim_->break_pc_ != NULL) { - sim_->break_pc_->SetInstructionBits(Instr::kSimulatorBreakpointInstruction); - } -} - - -void SimulatorDebugger::Debug() { - intptr_t last_pc = -1; - bool done = false; - -#define COMMAND_SIZE 63 -#define ARG_SIZE 255 - -#define STR(a) #a -#define XSTR(a) STR(a) - - char cmd[COMMAND_SIZE + 1]; - char arg1[ARG_SIZE + 1]; - char arg2[ARG_SIZE + 1]; - - // make sure to have a proper terminating character if reaching the limit - cmd[COMMAND_SIZE] = 0; - arg1[ARG_SIZE] = 0; - arg2[ARG_SIZE] = 0; - - // Undo all set breakpoints while running in the debugger shell. This will - // make them invisible to all commands. - UndoBreakpoints(); - - while (!done) { - if (last_pc != sim_->get_pc()) { - last_pc = sim_->get_pc(); - if (Simulator::IsIllegalAddress(last_pc)) { - OS::Print("pc is out of bounds: 0x%" Px "\n", last_pc); - } else { - if (FLAG_support_disassembler) { - Disassembler::Disassemble(last_pc, last_pc + Instr::kInstrSize); - } else { - OS::Print("Disassembler not supported in this mode.\n"); - } - } - } - char* line = ReadLine("sim> "); - if (line == NULL) { - FATAL("ReadLine failed"); - } else { - // Use sscanf to parse the individual parts of the command line. At the - // moment no command expects more than two parameters. - int args = SScanF(line, - "%" XSTR(COMMAND_SIZE) "s " - "%" XSTR(ARG_SIZE) "s " - "%" XSTR(ARG_SIZE) "s", - cmd, arg1, arg2); - if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { - OS::Print( - "c/cont -- continue execution\n" - "disasm -- disassemble instrs at current pc location\n" - " other variants are:\n" - " disasm
\n" - " disasm
\n" - " by default 10 instrs are disassembled\n" - "del -- delete breakpoints\n" - "gdb -- transfer control to gdb\n" - "h/help -- print this help string\n" - "break
-- set break point at specified address\n" - "p/print -- print integer\n" - "pf/printfloat -- print float value\n" - "po/printobject <*reg or *addr> -- print object\n" - "si/stepi -- single step an instruction\n" - "trace -- toggle execution tracing mode\n" - "bt -- print backtrace\n" - "unstop -- if current pc is a stop instr make it a nop\n" - "q/quit -- Quit the debugger and exit the program\n"); - } else if ((strcmp(cmd, "quit") == 0) || (strcmp(cmd, "q") == 0)) { - OS::Print("Quitting\n"); - OS::Exit(0); - } else if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) { - sim_->InstructionDecode(reinterpret_cast(sim_->get_pc())); - } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) { - // Execute the one instruction we broke at with breakpoints disabled. - sim_->InstructionDecode(reinterpret_cast(sim_->get_pc())); - // Leave the debugger shell. - done = true; - } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { - if (args == 2) { - uint32_t value; - if (strcmp(arg1, "icount") == 0) { - const uint64_t icount = sim_->get_icount(); - OS::Print("icount: %" Pu64 " 0x%" Px64 "\n", icount, icount); - } else if (GetValue(arg1, &value)) { - OS::Print("%s: %u 0x%x\n", arg1, value, value); - } else { - OS::Print("%s unrecognized\n", arg1); - } - } else { - OS::Print("print \n"); - } - } else if ((strcmp(cmd, "pf") == 0) || (strcmp(cmd, "printfloat") == 0)) { - if (args == 2) { - double dvalue; - if (GetFValue(arg1, &dvalue)) { - uint64_t long_value = bit_cast(dvalue); - OS::Print("%s: %llu 0x%llx %.8g\n", arg1, long_value, long_value, - dvalue); - } else { - OS::Print("%s unrecognized\n", arg1); - } - } else { - OS::Print("printfloat \n"); - } - } else if ((strcmp(cmd, "pd") == 0) || - (strcmp(cmd, "printdouble") == 0)) { - if (args == 2) { - double dvalue; - if (GetDValue(arg1, &dvalue)) { - uint64_t long_value = bit_cast(dvalue); - OS::Print("%s: %llu 0x%llx %.8g\n", arg1, long_value, long_value, - dvalue); - } else { - OS::Print("%s unrecognized\n", arg1); - } - } else { - OS::Print("printfloat \n"); - } - } else if ((strcmp(cmd, "po") == 0) || - (strcmp(cmd, "printobject") == 0)) { - if (args == 2) { - uint32_t value; - // Make the dereferencing '*' optional. - if (((arg1[0] == '*') && GetValue(arg1 + 1, &value)) || - GetValue(arg1, &value)) { - if (Isolate::Current()->heap()->Contains(value)) { - OS::Print("%s: \n", arg1); -#if defined(DEBUG) - const Object& obj = - Object::Handle(reinterpret_cast(value)); - obj.Print(); -#endif // defined(DEBUG) - } else { - OS::Print("0x%x is not an object reference\n", value); - } - } else { - OS::Print("%s unrecognized\n", arg1); - } - } else { - OS::Print("printobject <*reg or *addr>\n"); - } - } else if (strcmp(cmd, "disasm") == 0) { - uint32_t start = 0; - uint32_t end = 0; - if (args == 1) { - start = sim_->get_pc(); - end = start + (10 * Instr::kInstrSize); - } else if (args == 2) { - if (GetValue(arg1, &start)) { - // No length parameter passed, assume 10 instructions. - if (Simulator::IsIllegalAddress(start)) { - // If start isn't a valid address, warn and use PC instead. - OS::Print("First argument yields invalid address: 0x%x\n", start); - OS::Print("Using PC instead\n"); - start = sim_->get_pc(); - } - end = start + (10 * Instr::kInstrSize); - } - } else { - uint32_t length; - if (GetValue(arg1, &start) && GetValue(arg2, &length)) { - if (Simulator::IsIllegalAddress(start)) { - // If start isn't a valid address, warn and use PC instead. - OS::Print("First argument yields invalid address: 0x%x\n", start); - OS::Print("Using PC instead\n"); - start = sim_->get_pc(); - } - end = start + (length * Instr::kInstrSize); - } - } - if ((start > 0) && (end > start)) { - if (FLAG_support_disassembler) { - Disassembler::Disassemble(start, end); - } else { - OS::Print("Disassembler not supported in this mode.\n"); - } - } else { - OS::Print("disasm [
[]]\n"); - } - } else if (strcmp(cmd, "gdb") == 0) { - OS::Print("relinquishing control to gdb\n"); - OS::DebugBreak(); - OS::Print("regaining control from gdb\n"); - } else if (strcmp(cmd, "break") == 0) { - if (args == 2) { - uint32_t addr; - if (GetValue(arg1, &addr)) { - if (!SetBreakpoint(reinterpret_cast(addr))) { - OS::Print("setting breakpoint failed\n"); - } - } else { - OS::Print("%s unrecognized\n", arg1); - } - } else { - OS::Print("break \n"); - } - } else if (strcmp(cmd, "del") == 0) { - if (!DeleteBreakpoint(NULL)) { - OS::Print("deleting breakpoint failed\n"); - } - } else if (strcmp(cmd, "unstop") == 0) { - intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize; - Instr* stop_instr = reinterpret_cast(stop_pc); - if (stop_instr->IsBreakPoint()) { - stop_instr->SetInstructionBits(Instr::kNopInstruction); - } else { - OS::Print("Not at debugger stop.\n"); - } - } else if (strcmp(cmd, "trace") == 0) { - if (FLAG_trace_sim_after == ULLONG_MAX) { - FLAG_trace_sim_after = sim_->get_icount(); - OS::Print("execution tracing on\n"); - } else { - FLAG_trace_sim_after = ULLONG_MAX; - OS::Print("execution tracing off\n"); - } - } else if (strcmp(cmd, "bt") == 0) { - PrintBacktrace(); - } else { - OS::Print("Unknown command: %s\n", cmd); - } - } - delete[] line; - } - - // Add all the breakpoints back to stop execution and enter the debugger - // shell when hit. - RedoBreakpoints(); - -#undef COMMAND_SIZE -#undef ARG_SIZE - -#undef STR -#undef XSTR -} - - -char* SimulatorDebugger::ReadLine(const char* prompt) { - char* result = NULL; - char line_buf[256]; - intptr_t offset = 0; - bool keep_going = true; - OS::Print("%s", prompt); - while (keep_going) { - if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) { - // fgets got an error. Just give up. - if (result != NULL) { - delete[] result; - } - return NULL; - } - intptr_t len = strlen(line_buf); - if (len > 1 && line_buf[len - 2] == '\\' && line_buf[len - 1] == '\n') { - // When we read a line that ends with a "\" we remove the escape and - // append the remainder. - line_buf[len - 2] = '\n'; - line_buf[len - 1] = 0; - len -= 1; - } else if ((len > 0) && (line_buf[len - 1] == '\n')) { - // Since we read a new line we are done reading the line. This - // will exit the loop after copying this buffer into the result. - keep_going = false; - } - if (result == NULL) { - // Allocate the initial result and make room for the terminating '\0' - result = new char[len + 1]; - if (result == NULL) { - // OOM, so cannot readline anymore. - return NULL; - } - } else { - // Allocate a new result with enough room for the new addition. - intptr_t new_len = offset + len + 1; - char* new_result = new char[new_len]; - if (new_result == NULL) { - // OOM, free the buffer allocated so far and return NULL. - delete[] result; - return NULL; - } else { - // Copy the existing input into the new array and set the new - // array as the result. - memmove(new_result, result, offset); - delete[] result; - result = new_result; - } - } - // Copy the newly read line into the result. - memmove(result + offset, line_buf, len); - offset += len; - } - ASSERT(result != NULL); - result[offset] = '\0'; - return result; -} - - -// Synchronization primitives support. -Mutex* Simulator::exclusive_access_lock_ = NULL; -Simulator::AddressTag Simulator::exclusive_access_state_[kNumAddressTags] = { - {NULL, 0}}; -int Simulator::next_address_tag_ = 0; - - -void Simulator::InitOnce() { - // Setup exclusive access state lock. - exclusive_access_lock_ = new Mutex(); -} - - -Simulator::Simulator() { - // Setup simulator support first. Some of this information is needed to - // setup the architecture state. - // We allocate the stack here, the size is computed as the sum of - // the size specified by the user and the buffer space needed for - // handling stack overflow exceptions. To be safe in potential - // stack underflows we also add some underflow buffer space. - stack_ = - new char[(OSThread::GetSpecifiedStackSize() + OSThread::kStackSizeBuffer + - kSimulatorStackUnderflowSize)]; - icount_ = 0; - delay_slot_ = false; - break_pc_ = NULL; - break_instr_ = 0; - last_setjmp_buffer_ = NULL; - top_exit_frame_info_ = 0; - - // Setup architecture state. - // All registers are initialized to zero to start with. - for (int i = 0; i < kNumberOfCpuRegisters; i++) { - registers_[i] = 0; - } - pc_ = 0; - // The sp is initialized to point to the bottom (high address) of the - // allocated stack area. - registers_[SP] = StackTop(); - - // All double-precision registers are initialized to zero. - for (int i = 0; i < kNumberOfFRegisters; i++) { - fregisters_[i] = 0.0; - } - fcsr_ = 0; -} - - -Simulator::~Simulator() { - delete[] stack_; - Isolate* isolate = Isolate::Current(); - if (isolate != NULL) { - isolate->set_simulator(NULL); - } -} - - -// When the generated code calls an external reference we need to catch that in -// the simulator. The external reference will be a function compiled for the -// host architecture. We need to call that function instead of trying to -// execute it with the simulator. We do that by redirecting the external -// reference to a break instruction with code 2 that is handled by -// the simulator. We write the original destination of the jump just at a known -// offset from the break instruction so the simulator knows what to call. -class Redirection { - public: - uword address_of_break_instruction() { - return reinterpret_cast(&break_instruction_); - } - - uword external_function() const { return external_function_; } - - Simulator::CallKind call_kind() const { return call_kind_; } - - int argument_count() const { return argument_count_; } - - static Redirection* Get(uword external_function, - Simulator::CallKind call_kind, - int argument_count) { - Redirection* current; - for (current = list_; current != NULL; current = current->next_) { - if (current->external_function_ == external_function) return current; - } - return new Redirection(external_function, call_kind, argument_count); - } - - static Redirection* FromBreakInstruction(Instr* break_instruction) { - char* addr_of_break = reinterpret_cast(break_instruction); - char* addr_of_redirection = - addr_of_break - OFFSET_OF(Redirection, break_instruction_); - return reinterpret_cast(addr_of_redirection); - } - - static uword FunctionForRedirect(uword address_of_break) { - Redirection* current; - for (current = list_; current != NULL; current = current->next_) { - if (current->address_of_break_instruction() == address_of_break) { - return current->external_function_; - } - } - return 0; - } - - private: - Redirection(uword external_function, - Simulator::CallKind call_kind, - int argument_count) - : external_function_(external_function), - call_kind_(call_kind), - argument_count_(argument_count), - break_instruction_(Instr::kSimulatorRedirectInstruction), - next_(list_) { - // Atomically prepend this element to the front of the global list. - // Note: Since elements are never removed, there is no ABA issue. - Redirection* list_head = list_; - do { - next_ = list_head; - list_head = - reinterpret_cast(AtomicOperations::CompareAndSwapWord( - reinterpret_cast(&list_), reinterpret_cast(next_), - reinterpret_cast(this))); - } while (list_head != next_); - } - - uword external_function_; - Simulator::CallKind call_kind_; - int argument_count_; - uint32_t break_instruction_; - Redirection* next_; - static Redirection* list_; -}; - - -Redirection* Redirection::list_ = NULL; - - -uword Simulator::RedirectExternalReference(uword function, - CallKind call_kind, - int argument_count) { - Redirection* redirection = - Redirection::Get(function, call_kind, argument_count); - return redirection->address_of_break_instruction(); -} - - -uword Simulator::FunctionForRedirect(uword redirect) { - return Redirection::FunctionForRedirect(redirect); -} - - -// Get the active Simulator for the current isolate. -Simulator* Simulator::Current() { - Simulator* simulator = Isolate::Current()->simulator(); - if (simulator == NULL) { - simulator = new Simulator(); - Isolate::Current()->set_simulator(simulator); - } - return simulator; -} - - -// Sets the register in the architecture state. -void Simulator::set_register(Register reg, int32_t value) { - if (reg != R0) { - registers_[reg] = value; - } -} - - -void Simulator::set_fregister(FRegister reg, int32_t value) { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfFRegisters); - fregisters_[reg] = value; -} - - -void Simulator::set_fregister_float(FRegister reg, float value) { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfFRegisters); - fregisters_[reg] = bit_cast(value); -} - - -void Simulator::set_fregister_long(FRegister reg, int64_t value) { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfFRegisters); - ASSERT((reg & 1) == 0); - fregisters_[reg] = Utils::Low32Bits(value); - fregisters_[reg + 1] = Utils::High32Bits(value); -} - - -void Simulator::set_fregister_double(FRegister reg, double value) { - const int64_t ival = bit_cast(value); - set_fregister_long(reg, ival); -} - - -void Simulator::set_dregister_bits(DRegister reg, int64_t value) { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfDRegisters); - FRegister lo = static_cast(reg * 2); - FRegister hi = static_cast((reg * 2) + 1); - set_fregister(lo, Utils::Low32Bits(value)); - set_fregister(hi, Utils::High32Bits(value)); -} - - -void Simulator::set_dregister(DRegister reg, double value) { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfDRegisters); - set_dregister_bits(reg, bit_cast(value)); -} - - -// Get the register from the architecture state. -int32_t Simulator::get_register(Register reg) const { - if (reg == R0) { - return 0; - } - return registers_[reg]; -} - - -int32_t Simulator::get_fregister(FRegister reg) const { - ASSERT((reg >= 0) && (reg < kNumberOfFRegisters)); - return fregisters_[reg]; -} - - -float Simulator::get_fregister_float(FRegister reg) const { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfFRegisters); - return bit_cast(fregisters_[reg]); -} - - -int64_t Simulator::get_fregister_long(FRegister reg) const { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfFRegisters); - ASSERT((reg & 1) == 0); - const int32_t low = fregisters_[reg]; - const int32_t high = fregisters_[reg + 1]; - const int64_t value = Utils::LowHighTo64Bits(low, high); - return value; -} - - -double Simulator::get_fregister_double(FRegister reg) const { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfFRegisters); - ASSERT((reg & 1) == 0); - const int64_t value = get_fregister_long(reg); - return bit_cast(value); -} - - -int64_t Simulator::get_dregister_bits(DRegister reg) const { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfDRegisters); - FRegister lo = static_cast(reg * 2); - FRegister hi = static_cast((reg * 2) + 1); - return Utils::LowHighTo64Bits(get_fregister(lo), get_fregister(hi)); -} - - -double Simulator::get_dregister(DRegister reg) const { - ASSERT(reg >= 0); - ASSERT(reg < kNumberOfDRegisters); - const int64_t value = get_dregister_bits(reg); - return bit_cast(value); -} - - -void Simulator::UnimplementedInstruction(Instr* instr) { - char buffer[64]; - snprintf(buffer, sizeof(buffer), "Unimplemented instruction: pc=%p\n", instr); - SimulatorDebugger dbg(this); - dbg.Stop(instr, buffer); - FATAL("Cannot continue execution after unimplemented instruction."); -} - - -void Simulator::HandleIllegalAccess(uword addr, Instr* instr) { - uword fault_pc = get_pc(); - // The debugger will not be able to single step past this instruction, but - // it will be possible to disassemble the code and inspect registers. - char buffer[128]; - snprintf(buffer, sizeof(buffer), - "illegal memory access at 0x%" Px ", pc=0x%" Px "\n", addr, - fault_pc); - SimulatorDebugger dbg(this); - dbg.Stop(instr, buffer); - // The debugger will return control in non-interactive mode. - FATAL("Cannot continue execution after illegal memory access."); -} - - -void Simulator::UnalignedAccess(const char* msg, uword addr, Instr* instr) { - // The debugger will not be able to single step past this instruction, but - // it will be possible to disassemble the code and inspect registers. - char buffer[128]; - snprintf(buffer, sizeof(buffer), "pc=%p, unaligned %s at 0x%" Px "\n", instr, - msg, addr); - SimulatorDebugger dbg(this); - dbg.Stop(instr, buffer); - // The debugger will return control in non-interactive mode. - FATAL("Cannot continue execution after unaligned access."); -} - - -// Returns the top of the stack area to enable checking for stack pointer -// validity. -uword Simulator::StackTop() const { - // To be safe in potential stack underflows we leave some buffer above and - // set the stack top. - return StackBase() + - (OSThread::GetSpecifiedStackSize() + OSThread::kStackSizeBuffer); -} - - -bool Simulator::IsTracingExecution() const { - return icount_ > FLAG_trace_sim_after; -} - - -void Simulator::Format(Instr* instr, const char* format) { - OS::PrintErr("Simulator - unknown instruction: %s\n", format); - UNIMPLEMENTED(); -} - - -int8_t Simulator::ReadB(uword addr) { - int8_t* ptr = reinterpret_cast(addr); - return *ptr; -} - - -uint8_t Simulator::ReadBU(uword addr) { - uint8_t* ptr = reinterpret_cast(addr); - return *ptr; -} - - -int16_t Simulator::ReadH(uword addr, Instr* instr) { - if ((addr & 1) == 0) { - int16_t* ptr = reinterpret_cast(addr); - return *ptr; - } - UnalignedAccess("signed halfword read", addr, instr); - return 0; -} - - -uint16_t Simulator::ReadHU(uword addr, Instr* instr) { - if ((addr & 1) == 0) { - uint16_t* ptr = reinterpret_cast(addr); - return *ptr; - } - UnalignedAccess("unsigned halfword read", addr, instr); - return 0; -} - - -intptr_t Simulator::ReadW(uword addr, Instr* instr) { - if ((addr & 3) == 0) { - intptr_t* ptr = reinterpret_cast(addr); - return *ptr; - } - UnalignedAccess("read", addr, instr); - return 0; -} - - -void Simulator::WriteB(uword addr, uint8_t value) { - uint8_t* ptr = reinterpret_cast(addr); - *ptr = value; -} - - -void Simulator::WriteH(uword addr, uint16_t value, Instr* instr) { - if ((addr & 1) == 0) { - uint16_t* ptr = reinterpret_cast(addr); - *ptr = value; - return; - } - UnalignedAccess("halfword write", addr, instr); -} - - -void Simulator::WriteW(uword addr, intptr_t value, Instr* instr) { - if ((addr & 3) == 0) { - intptr_t* ptr = reinterpret_cast(addr); - *ptr = value; - return; - } - UnalignedAccess("write", addr, instr); -} - - -double Simulator::ReadD(uword addr, Instr* instr) { - if ((addr & 7) == 0) { - double* ptr = reinterpret_cast(addr); - return *ptr; - } - UnalignedAccess("double-precision floating point read", addr, instr); - return 0.0; -} - - -void Simulator::WriteD(uword addr, double value, Instr* instr) { - if ((addr & 7) == 0) { - double* ptr = reinterpret_cast(addr); - *ptr = value; - return; - } - UnalignedAccess("double-precision floating point write", addr, instr); -} - - -// Synchronization primitives support. -void Simulator::SetExclusiveAccess(uword addr) { - Thread* thread = Thread::Current(); - ASSERT(thread != NULL); - DEBUG_ASSERT(exclusive_access_lock_->IsOwnedByCurrentThread()); - int i = 0; - // Find an entry for this thread in the exclusive access state. - while ((i < kNumAddressTags) && - (exclusive_access_state_[i].thread != thread)) { - i++; - } - // Round-robin replacement of previously used entries. - if (i == kNumAddressTags) { - i = next_address_tag_; - if (++next_address_tag_ == kNumAddressTags) { - next_address_tag_ = 0; - } - exclusive_access_state_[i].thread = thread; - } - // Remember the address being reserved. - exclusive_access_state_[i].addr = addr; -} - - -bool Simulator::HasExclusiveAccessAndOpen(uword addr) { - Thread* thread = Thread::Current(); - ASSERT(thread != NULL); - ASSERT(addr != 0); - DEBUG_ASSERT(exclusive_access_lock_->IsOwnedByCurrentThread()); - bool result = false; - for (int i = 0; i < kNumAddressTags; i++) { - if (exclusive_access_state_[i].thread == thread) { - // Check whether the current thread's address reservation matches. - if (exclusive_access_state_[i].addr == addr) { - result = true; - } - exclusive_access_state_[i].addr = 0; - } else if (exclusive_access_state_[i].addr == addr) { - // Other threads with matching address lose their reservations. - exclusive_access_state_[i].addr = 0; - } - } - return result; -} - - -void Simulator::ClearExclusive() { - MutexLocker ml(exclusive_access_lock_); - // Remove the reservation for this thread. - SetExclusiveAccess(0); -} - - -intptr_t Simulator::ReadExclusiveW(uword addr, Instr* instr) { - MutexLocker ml(exclusive_access_lock_); - SetExclusiveAccess(addr); - return ReadW(addr, instr); -} - - -intptr_t Simulator::WriteExclusiveW(uword addr, intptr_t value, Instr* instr) { - MutexLocker ml(exclusive_access_lock_); - bool write_allowed = HasExclusiveAccessAndOpen(addr); - if (write_allowed) { - WriteW(addr, value, instr); - return 1; // Success. - } - return 0; // Failure. -} - - -uword Simulator::CompareExchange(uword* address, - uword compare_value, - uword new_value) { - MutexLocker ml(exclusive_access_lock_); - // We do not get a reservation as it would be guaranteed to be found when - // writing below. No other thread is able to make a reservation while we - // hold the lock. - uword value = *address; - if (value == compare_value) { - *address = new_value; - // Same effect on exclusive access state as a successful SC. - HasExclusiveAccessAndOpen(reinterpret_cast(address)); - } else { - // Same effect on exclusive access state as an LL. - SetExclusiveAccess(reinterpret_cast(address)); - } - return value; -} - - -uint32_t Simulator::CompareExchangeUint32(uint32_t* address, - uint32_t compare_value, - uint32_t new_value) { - COMPILE_ASSERT(sizeof(uword) == sizeof(uint32_t)); - return CompareExchange(reinterpret_cast(address), - static_cast(compare_value), - static_cast(new_value)); -} - - -// Calls into the Dart runtime are based on this interface. -typedef void (*SimulatorRuntimeCall)(NativeArguments arguments); - -// Calls to leaf Dart runtime functions are based on this interface. -typedef int32_t (*SimulatorLeafRuntimeCall)(int32_t r0, - int32_t r1, - int32_t r2, - int32_t r3); - -// Calls to leaf float Dart runtime functions are based on this interface. -typedef double (*SimulatorLeafFloatRuntimeCall)(double d0, double d1); - -// Calls to native Dart functions are based on this interface. -typedef void (*SimulatorBootstrapNativeCall)(NativeArguments* arguments); -typedef void (*SimulatorNativeCall)(NativeArguments* arguments, uword target); - - -void Simulator::DoBreak(Instr* instr) { - ASSERT(instr->OpcodeField() == SPECIAL); - ASSERT(instr->FunctionField() == BREAK); - if (instr->BreakCodeField() == Instr::kStopMessageCode) { - SimulatorDebugger dbg(this); - const char* message = *reinterpret_cast( - reinterpret_cast(instr) - Instr::kInstrSize); - set_pc(get_pc() + Instr::kInstrSize); - dbg.Stop(instr, message); - // Adjust for extra pc increment. - set_pc(get_pc() - Instr::kInstrSize); - } else if (instr->BreakCodeField() == Instr::kSimulatorRedirectCode) { - SimulatorSetjmpBuffer buffer(this); - - if (!setjmp(buffer.buffer_)) { - int32_t saved_ra = get_register(RA); - Redirection* redirection = Redirection::FromBreakInstruction(instr); - uword external = redirection->external_function(); - if (IsTracingExecution()) { - THR_Print("Call to host function at 0x%" Pd "\n", external); - } - - if ((redirection->call_kind() == kRuntimeCall) || - (redirection->call_kind() == kBootstrapNativeCall) || - (redirection->call_kind() == kNativeCall)) { - // Set the top_exit_frame_info of this simulator to the native stack. - set_top_exit_frame_info(Thread::GetCurrentStackPointer()); - } - if (redirection->call_kind() == kRuntimeCall) { - NativeArguments arguments; - ASSERT(sizeof(NativeArguments) == 4 * kWordSize); - arguments.thread_ = reinterpret_cast(get_register(A0)); - arguments.argc_tag_ = get_register(A1); - arguments.argv_ = reinterpret_cast(get_register(A2)); - arguments.retval_ = reinterpret_cast(get_register(A3)); - SimulatorRuntimeCall target = - reinterpret_cast(external); - target(arguments); - set_register(V0, icount_); // Zap result registers from void function. - set_register(V1, icount_); - } else if (redirection->call_kind() == kLeafRuntimeCall) { - int32_t a0 = get_register(A0); - int32_t a1 = get_register(A1); - int32_t a2 = get_register(A2); - int32_t a3 = get_register(A3); - SimulatorLeafRuntimeCall target = - reinterpret_cast(external); - a0 = target(a0, a1, a2, a3); - set_register(V0, a0); // Set returned result from function. - set_register(V1, icount_); // Zap second result register. - } else if (redirection->call_kind() == kLeafFloatRuntimeCall) { - ASSERT((0 <= redirection->argument_count()) && - (redirection->argument_count() <= 2)); - // double values are passed and returned in floating point registers. - SimulatorLeafFloatRuntimeCall target = - reinterpret_cast(external); - double d0 = 0.0; - double d6 = get_fregister_double(F12); - double d7 = get_fregister_double(F14); - d0 = target(d6, d7); - set_fregister_double(F0, d0); - } else if (redirection->call_kind() == kBootstrapNativeCall) { - ASSERT(redirection->argument_count() == 1); - NativeArguments* arguments; - arguments = reinterpret_cast(get_register(A0)); - SimulatorBootstrapNativeCall target = - reinterpret_cast(external); - target(arguments); - set_register(V0, icount_); // Zap result register from void function. - set_register(V1, icount_); - } else { - ASSERT(redirection->call_kind() == kNativeCall); - NativeArguments* arguments; - arguments = reinterpret_cast(get_register(A0)); - uword target_func = get_register(A1); - SimulatorNativeCall target = - reinterpret_cast(external); - target(arguments, target_func); - set_register(V0, icount_); // Zap result register from void function. - set_register(V1, icount_); - } - set_top_exit_frame_info(0); - - // Zap caller-saved registers, since the actual runtime call could have - // used them. - set_register(T0, icount_); - set_register(T1, icount_); - set_register(T2, icount_); - set_register(T3, icount_); - set_register(T4, icount_); - set_register(T5, icount_); - set_register(T6, icount_); - set_register(T7, icount_); - set_register(T8, icount_); - set_register(T9, icount_); - - set_register(A0, icount_); - set_register(A1, icount_); - set_register(A2, icount_); - set_register(A3, icount_); - set_register(TMP, icount_); - set_register(RA, icount_); - - // Zap floating point registers. - int32_t zap_dvalue = icount_; - for (int i = F4; i <= F18; i++) { - set_fregister(static_cast(i), zap_dvalue); - } - - // Return. Subtract to account for pc_ increment after return. - set_pc(saved_ra - Instr::kInstrSize); - } else { - // Coming via long jump from a throw. Continue to exception handler. - set_top_exit_frame_info(0); - // Adjust for extra pc increment. - set_pc(get_pc() - Instr::kInstrSize); - } - } else if (instr->BreakCodeField() == Instr::kSimulatorBreakCode) { - SimulatorDebugger dbg(this); - dbg.Stop(instr, "breakpoint"); - // Adjust for extra pc increment. - set_pc(get_pc() - Instr::kInstrSize); - } else { - SimulatorDebugger dbg(this); - set_pc(get_pc() + Instr::kInstrSize); - char buffer[32]; - snprintf(buffer, sizeof(buffer), "break #0x%x", instr->BreakCodeField()); - dbg.Stop(instr, buffer); - // Adjust for extra pc increment. - set_pc(get_pc() - Instr::kInstrSize); - } -} - - -void Simulator::DecodeSpecial(Instr* instr) { - ASSERT(instr->OpcodeField() == SPECIAL); - switch (instr->FunctionField()) { - case ADDU: { - ASSERT(instr->SaField() == 0); - // Format(instr, "addu 'rd, 'rs, 'rt"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - set_register(instr->RdField(), rs_val + rt_val); - break; - } - case AND: { - ASSERT(instr->SaField() == 0); - // Format(instr, "and 'rd, 'rs, 'rt"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - set_register(instr->RdField(), rs_val & rt_val); - break; - } - case BREAK: { - DoBreak(instr); - break; - } - case DIV: { - ASSERT(instr->RdField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "div 'rs, 'rt"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - if (rt_val == 0) { - // Results are unpredictable, but there is no arithmetic exception. - set_hi_register(icount_); - set_lo_register(icount_); - break; - } - - if ((rs_val == static_cast(0x80000000)) && - (rt_val == static_cast(0xffffffff))) { - set_lo_register(0x80000000); - set_hi_register(0); - } else { - set_lo_register(rs_val / rt_val); - set_hi_register(rs_val % rt_val); - } - break; - } - case DIVU: { - ASSERT(instr->RdField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "divu 'rs, 'rt"); - uint32_t rs_val = get_register(instr->RsField()); - uint32_t rt_val = get_register(instr->RtField()); - if (rt_val == 0) { - // Results are unpredictable, but there is no arithmetic exception. - set_hi_register(icount_); - set_lo_register(icount_); - break; - } - set_lo_register(rs_val / rt_val); - set_hi_register(rs_val % rt_val); - break; - } - case JALR: { - ASSERT(instr->RtField() == R0); - ASSERT(instr->RsField() != instr->RdField()); - ASSERT(!delay_slot_); - // Format(instr, "jalr'hint 'rd, rs"); - set_register(instr->RdField(), pc_ + 2 * Instr::kInstrSize); - uword next_pc = get_register(instr->RsField()); - ExecuteDelaySlot(); - // Set return address to be the instruction after the delay slot. - pc_ = next_pc - Instr::kInstrSize; // Account for regular PC increment. - break; - } - case JR: { - ASSERT(instr->RtField() == R0); - ASSERT(instr->RdField() == R0); - ASSERT(!delay_slot_); - // Format(instr, "jr'hint 'rs"); - uword next_pc = get_register(instr->RsField()); - ExecuteDelaySlot(); - pc_ = next_pc - Instr::kInstrSize; // Account for regular PC increment. - break; - } - case MFHI: { - ASSERT(instr->RsField() == 0); - ASSERT(instr->RtField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "mfhi 'rd"); - set_register(instr->RdField(), get_hi_register()); - break; - } - case MFLO: { - ASSERT(instr->RsField() == 0); - ASSERT(instr->RtField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "mflo 'rd"); - set_register(instr->RdField(), get_lo_register()); - break; - } - case MOVCI: { - ASSERT(instr->SaField() == 0); - ASSERT(instr->Bit(17) == 0); - int32_t rs_val = get_register(instr->RsField()); - uint32_t cc, fcsr_cc, test, status; - cc = instr->Bits(18, 3); - fcsr_cc = get_fcsr_condition_bit(cc); - test = instr->Bit(16); - status = test_fcsr_bit(fcsr_cc); - if (test == status) { - set_register(instr->RdField(), rs_val); - } - break; - } - case MOVN: { - ASSERT(instr->SaField() == 0); - // Format(instr, "movn 'rd, 'rs, 'rt"); - int32_t rt_val = get_register(instr->RtField()); - int32_t rs_val = get_register(instr->RsField()); - if (rt_val != 0) { - set_register(instr->RdField(), rs_val); - } - break; - } - case MOVZ: { - ASSERT(instr->SaField() == 0); - // Format(instr, "movz 'rd, 'rs, 'rt"); - int32_t rt_val = get_register(instr->RtField()); - int32_t rs_val = get_register(instr->RsField()); - if (rt_val == 0) { - set_register(instr->RdField(), rs_val); - } - break; - } - case MTHI: { - ASSERT(instr->RtField() == 0); - ASSERT(instr->RdField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "mthi 'rd"); - set_hi_register(get_register(instr->RsField())); - break; - } - case MTLO: { - ASSERT(instr->RtField() == 0); - ASSERT(instr->RdField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "mflo 'rd"); - set_lo_register(get_register(instr->RsField())); - break; - } - case MULT: { - ASSERT(instr->RdField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "mult 'rs, 'rt"); - int64_t rs = get_register(instr->RsField()); - int64_t rt = get_register(instr->RtField()); - int64_t res = rs * rt; - set_hi_register(Utils::High32Bits(res)); - set_lo_register(Utils::Low32Bits(res)); - break; - } - case MULTU: { - ASSERT(instr->RdField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "multu 'rs, 'rt"); - uint64_t rs = static_cast(get_register(instr->RsField())); - uint64_t rt = static_cast(get_register(instr->RtField())); - uint64_t res = rs * rt; - set_hi_register(Utils::High32Bits(res)); - set_lo_register(Utils::Low32Bits(res)); - break; - } - case NOR: { - ASSERT(instr->SaField() == 0); - // Format(instr, "nor 'rd, 'rs, 'rt"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - set_register(instr->RdField(), ~(rs_val | rt_val)); - break; - } - case OR: { - ASSERT(instr->SaField() == 0); - // Format(instr, "or 'rd, 'rs, 'rt"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - set_register(instr->RdField(), rs_val | rt_val); - break; - } - case SLL: { - ASSERT(instr->RsField() == 0); - if ((instr->RdField() == R0) && (instr->RtField() == R0) && - (instr->SaField() == 0)) { - // Format(instr, "nop"); - // Nothing to be done for NOP. - } else { - int32_t rt_val = get_register(instr->RtField()); - int sa = instr->SaField(); - set_register(instr->RdField(), rt_val << sa); - } - break; - } - case SLLV: { - ASSERT(instr->SaField() == 0); - // Format(instr, "sllv 'rd, 'rt, 'rs"); - int32_t rt_val = get_register(instr->RtField()); - int32_t rs_val = get_register(instr->RsField()); - set_register(instr->RdField(), rt_val << (rs_val & 0x1f)); - break; - } - case SLT: { - ASSERT(instr->SaField() == 0); - // Format(instr, "slt 'rd, 'rs, 'rt"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - set_register(instr->RdField(), rs_val < rt_val ? 1 : 0); - break; - } - case SLTU: { - ASSERT(instr->SaField() == 0); - // Format(instr, "sltu 'rd, 'rs, 'rt"); - uint32_t rs_val = static_cast(get_register(instr->RsField())); - uint32_t rt_val = static_cast(get_register(instr->RtField())); - set_register(instr->RdField(), rs_val < rt_val ? 1 : 0); - break; - } - case SRA: { - ASSERT(instr->RsField() == 0); - // Format(instr, "sra 'rd, 'rt, 'sa"); - int32_t rt_val = get_register(instr->RtField()); - int32_t sa = instr->SaField(); - set_register(instr->RdField(), rt_val >> sa); - break; - } - case SRAV: { - ASSERT(instr->SaField() == 0); - // Format(instr, "srav 'rd, 'rt, 'rs"); - int32_t rt_val = get_register(instr->RtField()); - int32_t rs_val = get_register(instr->RsField()); - set_register(instr->RdField(), rt_val >> (rs_val & 0x1f)); - break; - } - case SRL: { - ASSERT(instr->RsField() == 0); - // Format(instr, "srl 'rd, 'rt, 'sa"); - uint32_t rt_val = get_register(instr->RtField()); - uint32_t sa = instr->SaField(); - set_register(instr->RdField(), rt_val >> sa); - break; - } - case SRLV: { - ASSERT(instr->SaField() == 0); - // Format(instr, "srlv 'rd, 'rt, 'rs"); - uint32_t rt_val = get_register(instr->RtField()); - uint32_t rs_val = get_register(instr->RsField()); - set_register(instr->RdField(), rt_val >> (rs_val & 0x1f)); - break; - } - case SUBU: { - ASSERT(instr->SaField() == 0); - // Format(instr, "subu 'rd, 'rs, 'rt"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - set_register(instr->RdField(), rs_val - rt_val); - break; - } - case XOR: { - ASSERT(instr->SaField() == 0); - // Format(instr, "xor 'rd, 'rs, 'rt"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - set_register(instr->RdField(), rs_val ^ rt_val); - break; - } - default: { - OS::PrintErr("DecodeSpecial: 0x%x\n", instr->InstructionBits()); - UnimplementedInstruction(instr); - break; - } - } -} - - -void Simulator::DecodeSpecial2(Instr* instr) { - ASSERT(instr->OpcodeField() == SPECIAL2); - switch (instr->FunctionField()) { - case MADD: { - ASSERT(instr->RdField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "madd 'rs, 'rt"); - uint32_t lo = get_lo_register(); - int32_t hi = get_hi_register(); - int64_t accum = Utils::LowHighTo64Bits(lo, hi); - int64_t rs = get_register(instr->RsField()); - int64_t rt = get_register(instr->RtField()); - int64_t res = accum + rs * rt; - set_hi_register(Utils::High32Bits(res)); - set_lo_register(Utils::Low32Bits(res)); - break; - } - case MADDU: { - ASSERT(instr->RdField() == 0); - ASSERT(instr->SaField() == 0); - // Format(instr, "maddu 'rs, 'rt"); - uint32_t lo = get_lo_register(); - uint32_t hi = get_hi_register(); - uint64_t accum = Utils::LowHighTo64Bits(lo, hi); - uint64_t rs = static_cast(get_register(instr->RsField())); - uint64_t rt = static_cast(get_register(instr->RtField())); - uint64_t res = accum + rs * rt; - set_hi_register(Utils::High32Bits(res)); - set_lo_register(Utils::Low32Bits(res)); - break; - } - case CLO: { - ASSERT(instr->SaField() == 0); - ASSERT(instr->RtField() == instr->RdField()); - // Format(instr, "clo 'rd, 'rs"); - int32_t rs_val = get_register(instr->RsField()); - int32_t bitcount = 0; - while (rs_val < 0) { - bitcount++; - rs_val <<= 1; - } - set_register(instr->RdField(), bitcount); - break; - } - case CLZ: { - ASSERT(instr->SaField() == 0); - ASSERT(instr->RtField() == instr->RdField()); - // Format(instr, "clz 'rd, 'rs"); - int32_t rs_val = get_register(instr->RsField()); - int32_t bitcount = 0; - if (rs_val != 0) { - while (rs_val > 0) { - bitcount++; - rs_val <<= 1; - } - } else { - bitcount = 32; - } - set_register(instr->RdField(), bitcount); - break; - } - default: { - OS::PrintErr("DecodeSpecial2: 0x%x\n", instr->InstructionBits()); - UnimplementedInstruction(instr); - break; - } - } -} - - -void Simulator::DoBranch(Instr* instr, bool taken, bool likely) { - ASSERT(!delay_slot_); - int32_t imm_val = instr->SImmField() << 2; - - uword next_pc; - if (taken) { - // imm_val is added to the address of the instruction following the branch. - next_pc = pc_ + imm_val + Instr::kInstrSize; - if (likely) { - ExecuteDelaySlot(); - } - } else { - next_pc = pc_ + (2 * Instr::kInstrSize); // Next after delay slot. - } - if (!likely) { - ExecuteDelaySlot(); - } - pc_ = next_pc - Instr::kInstrSize; - - return; -} - - -void Simulator::DecodeRegImm(Instr* instr) { - ASSERT(instr->OpcodeField() == REGIMM); - switch (instr->RegImmFnField()) { - case BGEZ: { - // Format(instr, "bgez 'rs, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - DoBranch(instr, rs_val >= 0, false); - break; - } - case BGEZAL: { - int32_t rs_val = get_register(instr->RsField()); - // Return address is one after the delay slot. - set_register(RA, pc_ + (2 * Instr::kInstrSize)); - DoBranch(instr, rs_val >= 0, false); - break; - } - case BLTZAL: { - int32_t rs_val = get_register(instr->RsField()); - // Return address is one after the delay slot. - set_register(RA, pc_ + (2 * Instr::kInstrSize)); - DoBranch(instr, rs_val < 0, false); - break; - } - case BGEZL: { - // Format(instr, "bgezl 'rs, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - DoBranch(instr, rs_val >= 0, true); - break; - } - case BLTZ: { - // Format(instr, "bltz 'rs, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - DoBranch(instr, rs_val < 0, false); - break; - } - case BLTZL: { - // Format(instr, "bltzl 'rs, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - DoBranch(instr, rs_val < 0, true); - break; - } - default: { - OS::PrintErr("DecodeRegImm: 0x%x\n", instr->InstructionBits()); - UnimplementedInstruction(instr); - break; - } - } -} - - -void Simulator::DecodeCop1(Instr* instr) { - ASSERT(instr->OpcodeField() == COP1); - if (instr->HasFormat()) { - // If the rs field is a valid format, then the function field identifies the - // instruction. - double fs_val = get_fregister_double(instr->FsField()); - double ft_val = get_fregister_double(instr->FtField()); - uint32_t cc, fcsr_cc; - cc = instr->FpuCCField(); - fcsr_cc = get_fcsr_condition_bit(cc); - switch (instr->Cop1FunctionField()) { - case COP1_ADD: { - // Format(instr, "add.'fmt 'fd, 'fs, 'ft"); - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - set_fregister_double(instr->FdField(), fs_val + ft_val); - break; - } - case COP1_SUB: { - // Format(instr, "sub.'fmt 'fd, 'fs, 'ft"); - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - set_fregister_double(instr->FdField(), fs_val - ft_val); - break; - } - case COP1_MUL: { - // Format(instr, "mul.'fmt 'fd, 'fs, 'ft"); - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - set_fregister_double(instr->FdField(), fs_val * ft_val); - break; - } - case COP1_DIV: { - // Format(instr, "div.'fmt 'fd, 'fs, 'ft"); - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - set_fregister_double(instr->FdField(), fs_val / ft_val); - break; - } - case COP1_SQRT: { - // Format(instr, "sqrt.'fmt 'fd, 'fs"); - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - set_fregister_double(instr->FdField(), sqrt(fs_val)); - break; - } - case COP1_MOV: { - // Format(instr, "mov.'fmt 'fd, 'fs"); - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - set_fregister_double(instr->FdField(), fs_val); - break; - } - case COP1_NEG: { - // Format(instr, "neg.'fmt 'fd, 'fs"); - ASSERT(instr->FormatField() == FMT_D); - set_fregister_double(instr->FdField(), -fs_val); - break; - } - case COP1_C_F: { - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - ASSERT(instr->FdField() == F0); - set_fcsr_bit(fcsr_cc, false); - break; - } - case COP1_C_UN: { - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - ASSERT(instr->FdField() == F0); - set_fcsr_bit(fcsr_cc, isnan(fs_val) || isnan(ft_val)); - break; - } - case COP1_C_EQ: { - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - ASSERT(instr->FdField() == F0); - set_fcsr_bit(fcsr_cc, (fs_val == ft_val)); - break; - } - case COP1_C_UEQ: { - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - ASSERT(instr->FdField() == F0); - set_fcsr_bit(fcsr_cc, - (fs_val == ft_val) || isnan(fs_val) || isnan(ft_val)); - break; - } - case COP1_C_OLT: { - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - ASSERT(instr->FdField() == F0); - set_fcsr_bit(fcsr_cc, (fs_val < ft_val)); - break; - } - case COP1_C_ULT: { - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - ASSERT(instr->FdField() == F0); - set_fcsr_bit(fcsr_cc, - (fs_val < ft_val) || isnan(fs_val) || isnan(ft_val)); - break; - } - case COP1_C_OLE: { - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - ASSERT(instr->FdField() == F0); - set_fcsr_bit(fcsr_cc, (fs_val <= ft_val)); - break; - } - case COP1_C_ULE: { - ASSERT(instr->FormatField() == FMT_D); // Only D supported. - ASSERT(instr->FdField() == F0); - set_fcsr_bit(fcsr_cc, - (fs_val <= ft_val) || isnan(fs_val) || isnan(ft_val)); - break; - } - case COP1_TRUNC_W: { - switch (instr->FormatField()) { - case FMT_D: { - double fs_dbl = get_fregister_double(instr->FsField()); - int32_t fs_int; - if (isnan(fs_dbl) || isinf(fs_dbl) || (fs_dbl > kMaxInt32) || - (fs_dbl < kMinInt32)) { - fs_int = kMaxInt32; - } else { - fs_int = static_cast(fs_dbl); - } - set_fregister(instr->FdField(), fs_int); - break; - } - default: { - OS::PrintErr("DecodeCop1: 0x%x\n", instr->InstructionBits()); - UnimplementedInstruction(instr); - break; - } - } - break; - } - case COP1_CVT_D: { - switch (instr->FormatField()) { - case FMT_W: { - int32_t fs_int = get_fregister(instr->FsField()); - double fs_dbl = static_cast(fs_int); - set_fregister_double(instr->FdField(), fs_dbl); - break; - } - case FMT_S: { - float fs_flt = get_fregister_float(instr->FsField()); - double fs_dbl = static_cast(fs_flt); - set_fregister_double(instr->FdField(), fs_dbl); - break; - } - default: { - OS::PrintErr("DecodeCop1: 0x%x\n", instr->InstructionBits()); - UnimplementedInstruction(instr); - break; - } - } - break; - } - case COP1_CVT_S: { - switch (instr->FormatField()) { - case FMT_D: { - double fs_dbl = get_fregister_double(instr->FsField()); - float fs_flt = static_cast(fs_dbl); - set_fregister_float(instr->FdField(), fs_flt); - break; - } - default: { - OS::PrintErr("DecodeCop1: 0x%x\n", instr->InstructionBits()); - UnimplementedInstruction(instr); - break; - } - } - break; - } - default: { - OS::PrintErr("DecodeCop1: 0x%x\n", instr->InstructionBits()); - UnimplementedInstruction(instr); - break; - } - } - } else { - // If the rs field isn't a valid format, then it must be a sub-op. - switch (instr->Cop1SubField()) { - case COP1_MF: { - // Format(instr, "mfc1 'rt, 'fs"); - ASSERT(instr->Bits(0, 11) == 0); - int32_t fs_val = get_fregister(instr->FsField()); - set_register(instr->RtField(), fs_val); - break; - } - case COP1_MT: { - // Format(instr, "mtc1 'rt, 'fs"); - ASSERT(instr->Bits(0, 11) == 0); - int32_t rt_val = get_register(instr->RtField()); - set_fregister(instr->FsField(), rt_val); - break; - } - case COP1_BC: { - ASSERT(instr->Bit(17) == 0); - uint32_t cc, fcsr_cc; - cc = instr->Bits(18, 3); - fcsr_cc = get_fcsr_condition_bit(cc); - if (instr->Bit(16) == 1) { // Branch on true. - DoBranch(instr, test_fcsr_bit(fcsr_cc), false); - } else { // Branch on false. - DoBranch(instr, !test_fcsr_bit(fcsr_cc), false); - } - break; - } - default: { - OS::PrintErr("DecodeCop1: 0x%x\n", instr->InstructionBits()); - UnimplementedInstruction(instr); - break; - } - } - } -} - - -void Simulator::InstructionDecode(Instr* instr) { - if (IsTracingExecution()) { - THR_Print("%" Pu64 " ", icount_); - const uword start = reinterpret_cast(instr); - const uword end = start + Instr::kInstrSize; - if (FLAG_support_disassembler) { - Disassembler::Disassemble(start, end); - } else { - THR_Print("Disassembler not supported in this mode.\n"); - } - } - - switch (instr->OpcodeField()) { - case SPECIAL: { - DecodeSpecial(instr); - break; - } - case SPECIAL2: { - DecodeSpecial2(instr); - break; - } - case REGIMM: { - DecodeRegImm(instr); - break; - } - case COP1: { - DecodeCop1(instr); - break; - } - case ADDIU: { - // Format(instr, "addiu 'rt, 'rs, 'imms"); - int32_t rs_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - int32_t res = rs_val + imm_val; - // Rt is set even on overflow. - set_register(instr->RtField(), res); - break; - } - case ANDI: { - // Format(instr, "andi 'rt, 'rs, 'immu"); - int32_t rs_val = get_register(instr->RsField()); - set_register(instr->RtField(), rs_val & instr->UImmField()); - break; - } - case BEQ: { - // Format(instr, "beq 'rs, 'rt, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - DoBranch(instr, rs_val == rt_val, false); - break; - } - case BEQL: { - // Format(instr, "beql 'rs, 'rt, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - DoBranch(instr, rs_val == rt_val, true); - break; - } - case BGTZ: { - ASSERT(instr->RtField() == R0); - // Format(instr, "bgtz 'rs, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - DoBranch(instr, rs_val > 0, false); - break; - } - case BGTZL: { - ASSERT(instr->RtField() == R0); - // Format(instr, "bgtzl 'rs, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - DoBranch(instr, rs_val > 0, true); - break; - } - case BLEZ: { - ASSERT(instr->RtField() == R0); - // Format(instr, "blez 'rs, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - DoBranch(instr, rs_val <= 0, false); - break; - } - case BLEZL: { - ASSERT(instr->RtField() == R0); - // Format(instr, "blezl 'rs, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - DoBranch(instr, rs_val <= 0, true); - break; - } - case BNE: { - // Format(instr, "bne 'rs, 'rt, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - DoBranch(instr, rs_val != rt_val, false); - break; - } - case BNEL: { - // Format(instr, "bnel 'rs, 'rt, 'dest"); - int32_t rs_val = get_register(instr->RsField()); - int32_t rt_val = get_register(instr->RtField()); - DoBranch(instr, rs_val != rt_val, true); - break; - } - case LB: { - // Format(instr, "lb 'rt, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - int32_t res = ReadB(addr); - set_register(instr->RtField(), res); - } - break; - } - case LBU: { - // Format(instr, "lbu 'rt, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - uint32_t res = ReadBU(addr); - set_register(instr->RtField(), res); - } - break; - } - case LDC1: { - // Format(instr, "ldc1 'ft, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - double value = ReadD(addr, instr); - set_fregister_double(instr->FtField(), value); - } - break; - } - case LH: { - // Format(instr, "lh 'rt, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - int32_t res = ReadH(addr, instr); - set_register(instr->RtField(), res); - } - break; - } - case LHU: { - // Format(instr, "lhu 'rt, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - int32_t res = ReadHU(addr, instr); - set_register(instr->RtField(), res); - } - break; - } - case LUI: { - ASSERT(instr->RsField() == 0); - set_register(instr->RtField(), instr->UImmField() << 16); - break; - } - case LL: { - // Format(instr, "ll 'rt, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - int32_t res = ReadExclusiveW(addr, instr); - set_register(instr->RtField(), res); - } - break; - } - case LW: { - // Format(instr, "lw 'rt, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - int32_t res = ReadW(addr, instr); - set_register(instr->RtField(), res); - } - break; - } - case LWC1: { - // Format(instr, "lwc1 'ft, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - int32_t value = ReadW(addr, instr); - set_fregister(instr->FtField(), value); - } - break; - } - case ORI: { - // Format(instr, "ori 'rt, 'rs, 'immu"); - int32_t rs_val = get_register(instr->RsField()); - set_register(instr->RtField(), rs_val | instr->UImmField()); - break; - } - case SB: { - // Format(instr, "sb 'rt, 'imms('rs)"); - int32_t rt_val = get_register(instr->RtField()); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - WriteB(addr, rt_val & 0xff); - } - break; - } - case SC: { - // Format(instr, "sc 'rt, 'imms('rs)"); - int32_t rt_val = get_register(instr->RtField()); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - intptr_t status = WriteExclusiveW(addr, rt_val, instr); - set_register(instr->RtField(), status); - } - break; - } - case SLTI: { - // Format(instr, "slti 'rt, 'rs, 'imms"); - int32_t rs_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - set_register(instr->RtField(), rs_val < imm_val ? 1 : 0); - break; - } - case SLTIU: { - // Format(instr, "sltiu 'rt, 'rs, 'imms"); - uint32_t rs_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); // Sign extend to 32-bit. - uint32_t immu_val = static_cast(imm_val); // Treat as unsigned. - set_register(instr->RtField(), rs_val < immu_val ? 1 : 0); - break; - } - case SDC1: { - // Format(instr, "sdc1 'ft, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - double value = get_fregister_double(instr->FtField()); - WriteD(addr, value, instr); - } - break; - } - case SH: { - // Format(instr, "sh 'rt, 'imms('rs)"); - int32_t rt_val = get_register(instr->RtField()); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - WriteH(addr, rt_val & 0xffff, instr); - } - break; - } - case SW: { - // Format(instr, "sw 'rt, 'imms('rs)"); - int32_t rt_val = get_register(instr->RtField()); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - WriteW(addr, rt_val, instr); - } - break; - } - case SWC1: { - // Format(instr, "swc1 'ft, 'imms('rs)"); - int32_t base_val = get_register(instr->RsField()); - int32_t imm_val = instr->SImmField(); - uword addr = base_val + imm_val; - if (Simulator::IsIllegalAddress(addr)) { - HandleIllegalAccess(addr, instr); - } else { - int32_t value = get_fregister(instr->FtField()); - WriteW(addr, value, instr); - } - break; - } - case XORI: { - // Format(instr, "xori 'rt, 'rs, 'immu"); - int32_t rs_val = get_register(instr->RsField()); - set_register(instr->RtField(), rs_val ^ instr->UImmField()); - break; - break; - } - default: { - OS::PrintErr("Undecoded instruction: 0x%x at %p\n", - instr->InstructionBits(), instr); - UnimplementedInstruction(instr); - break; - } - } - pc_ += Instr::kInstrSize; -} - - -void Simulator::ExecuteDelaySlot() { - ASSERT(pc_ != kEndSimulatingPC); - delay_slot_ = true; - icount_++; - Instr* instr = Instr::At(pc_ + Instr::kInstrSize); - if (FLAG_stop_sim_at != ULLONG_MAX) { - if (icount_ == FLAG_stop_sim_at) { - SimulatorDebugger dbg(this); - dbg.Stop(instr, "Instruction count reached"); - } else if (reinterpret_cast(instr) == FLAG_stop_sim_at) { - SimulatorDebugger dbg(this); - dbg.Stop(instr, "Instruction address reached"); - } - } - InstructionDecode(instr); - delay_slot_ = false; -} - - -void Simulator::Execute() { - if (FLAG_stop_sim_at == ULLONG_MAX) { - // Fast version of the dispatch loop without checking whether the simulator - // should be stopping at a particular executed instruction. - while (pc_ != kEndSimulatingPC) { - icount_++; - Instr* instr = Instr::At(pc_); - if (IsIllegalAddress(pc_)) { - HandleIllegalAccess(pc_, instr); - } else { - InstructionDecode(instr); - } - } - } else { - // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when - // we reach the particular instruction count or address. - while (pc_ != kEndSimulatingPC) { - Instr* instr = Instr::At(pc_); - icount_++; - if (icount_ == FLAG_stop_sim_at) { - SimulatorDebugger dbg(this); - dbg.Stop(instr, "Instruction count reached"); - } else if (reinterpret_cast(instr) == FLAG_stop_sim_at) { - SimulatorDebugger dbg(this); - dbg.Stop(instr, "Instruction address reached"); - } else if (IsIllegalAddress(pc_)) { - HandleIllegalAccess(pc_, instr); - } else { - InstructionDecode(instr); - } - } - } -} - - -int64_t Simulator::Call(int32_t entry, - int32_t parameter0, - int32_t parameter1, - int32_t parameter2, - int32_t parameter3, - bool fp_return, - bool fp_args) { - // Save the SP register before the call so we can restore it. - int32_t sp_before_call = get_register(SP); - - // Setup parameters. - if (fp_args) { - set_fregister(F0, parameter0); - set_fregister(F1, parameter1); - set_fregister(F2, parameter2); - set_fregister(F3, parameter3); - } else { - set_register(A0, parameter0); - set_register(A1, parameter1); - set_register(A2, parameter2); - set_register(A3, parameter3); - } - - // Make sure the activation frames are properly aligned. - int32_t stack_pointer = sp_before_call; - if (OS::ActivationFrameAlignment() > 1) { - stack_pointer = - Utils::RoundDown(stack_pointer, OS::ActivationFrameAlignment()); - } - set_register(SP, stack_pointer); - - // Prepare to execute the code at entry. - set_pc(entry); - // Put down marker for end of simulation. The simulator will stop simulation - // when the PC reaches this value. By saving the "end simulation" value into - // RA the simulation stops when returning to this call point. - set_register(RA, kEndSimulatingPC); - - // Remember the values of callee-saved registers. - // The code below assumes that r9 is not used as sb (static base) in - // simulator code and therefore is regarded as a callee-saved register. - int32_t r16_val = get_register(R16); - int32_t r17_val = get_register(R17); - int32_t r18_val = get_register(R18); - int32_t r19_val = get_register(R19); - int32_t r20_val = get_register(R20); - int32_t r21_val = get_register(R21); - int32_t r22_val = get_register(R22); - int32_t r23_val = get_register(R23); - - double d10_val = get_dregister(D10); - double d11_val = get_dregister(D11); - double d12_val = get_dregister(D12); - double d13_val = get_dregister(D13); - double d14_val = get_dregister(D14); - double d15_val = get_dregister(D15); - - // Setup the callee-saved registers with a known value. To be able to check - // that they are preserved properly across dart execution. - int32_t callee_saved_value = icount_; - set_register(R16, callee_saved_value); - set_register(R17, callee_saved_value); - set_register(R18, callee_saved_value); - set_register(R19, callee_saved_value); - set_register(R20, callee_saved_value); - set_register(R21, callee_saved_value); - set_register(R22, callee_saved_value); - set_register(R23, callee_saved_value); - - set_dregister_bits(D10, callee_saved_value); - set_dregister_bits(D11, callee_saved_value); - set_dregister_bits(D12, callee_saved_value); - set_dregister_bits(D13, callee_saved_value); - set_dregister_bits(D14, callee_saved_value); - set_dregister_bits(D15, callee_saved_value); - - // Start the simulation - Execute(); - - // Check that the callee-saved registers have been preserved. - ASSERT(callee_saved_value == get_register(R16)); - ASSERT(callee_saved_value == get_register(R17)); - ASSERT(callee_saved_value == get_register(R18)); - ASSERT(callee_saved_value == get_register(R19)); - ASSERT(callee_saved_value == get_register(R20)); - ASSERT(callee_saved_value == get_register(R21)); - ASSERT(callee_saved_value == get_register(R22)); - ASSERT(callee_saved_value == get_register(R23)); - - ASSERT(callee_saved_value == get_dregister_bits(D10)); - ASSERT(callee_saved_value == get_dregister_bits(D11)); - ASSERT(callee_saved_value == get_dregister_bits(D12)); - ASSERT(callee_saved_value == get_dregister_bits(D13)); - ASSERT(callee_saved_value == get_dregister_bits(D14)); - ASSERT(callee_saved_value == get_dregister_bits(D15)); - - // Restore callee-saved registers with the original value. - set_register(R16, r16_val); - set_register(R17, r17_val); - set_register(R18, r18_val); - set_register(R19, r19_val); - set_register(R20, r20_val); - set_register(R21, r21_val); - set_register(R22, r22_val); - set_register(R23, r23_val); - - set_dregister(D10, d10_val); - set_dregister(D11, d11_val); - set_dregister(D12, d12_val); - set_dregister(D13, d13_val); - set_dregister(D14, d14_val); - set_dregister(D15, d15_val); - - // Restore the SP register and return V1:V0. - set_register(SP, sp_before_call); - int64_t return_value; - if (fp_return) { - return_value = Utils::LowHighTo64Bits(get_fregister(F0), get_fregister(F1)); - } else { - return_value = Utils::LowHighTo64Bits(get_register(V0), get_register(V1)); - } - return return_value; -} - - -void Simulator::JumpToFrame(uword pc, uword sp, uword fp, Thread* thread) { - // Walk over all setjmp buffers (simulated --> C++ transitions) - // and try to find the setjmp associated with the simulated stack pointer. - SimulatorSetjmpBuffer* buf = last_setjmp_buffer(); - while (buf->link() != NULL && buf->link()->sp() <= sp) { - buf = buf->link(); - } - ASSERT(buf != NULL); - - // The C++ caller has not cleaned up the stack memory of C++ frames. - // Prepare for unwinding frames by destroying all the stack resources - // in the previous C++ frames. - StackResource::Unwind(thread); - - // Unwind the C++ stack and continue simulation in the target frame. - set_pc(static_cast(pc)); - set_register(SP, static_cast(sp)); - set_register(FP, static_cast(fp)); - set_register(THR, reinterpret_cast(thread)); - // Set the tag. - thread->set_vm_tag(VMTag::kDartTagId); - // Clear top exit frame. - thread->set_top_exit_frame_info(0); - // Restore pool pointer. - int32_t code = - *reinterpret_cast(fp + kPcMarkerSlotFromFp * kWordSize); - int32_t pp = *reinterpret_cast(code + Code::object_pool_offset() - - kHeapObjectTag); - set_register(CODE_REG, code); - set_register(PP, pp); - buf->Longjmp(); -} - -} // namespace dart - -#endif // defined(USING_SIMULATOR) - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/simulator_mips.h b/runtime/vm/simulator_mips.h deleted file mode 100644 index 00d93ec16dd..00000000000 --- a/runtime/vm/simulator_mips.h +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -// Declares a Simulator for MIPS instructions if we are not generating a native -// MIPS binary. This Simulator allows us to run and debug MIPS code generation -// on regular desktop machines. -// Dart calls into generated code by "calling" the InvokeDartCode stub, -// which will start execution in the Simulator or forwards to the real entry -// on a MIPS HW platform. - -#ifndef RUNTIME_VM_SIMULATOR_MIPS_H_ -#define RUNTIME_VM_SIMULATOR_MIPS_H_ - -#ifndef RUNTIME_VM_SIMULATOR_H_ -#error Do not include simulator_mips.h directly; use simulator.h. -#endif - -#include "vm/constants_mips.h" - -namespace dart { - -class Isolate; -class Mutex; -class RawObject; -class SimulatorSetjmpBuffer; -class Thread; - -class Simulator { - public: - static const uword kSimulatorStackUnderflowSize = 64; - - Simulator(); - ~Simulator(); - - // The currently executing Simulator instance, which is associated to the - // current isolate - static Simulator* Current(); - - // Accessors for register state. - void set_register(Register reg, int32_t value); - int32_t get_register(Register reg) const; - - // Accessors for floating point register state. - void set_fregister(FRegister freg, int32_t value); - void set_fregister_float(FRegister freg, float value); - void set_fregister_double(FRegister freg, double value); - void set_fregister_long(FRegister freg, int64_t value); - - int32_t get_fregister(FRegister freg) const; - float get_fregister_float(FRegister freg) const; - double get_fregister_double(FRegister freg) const; - int64_t get_fregister_long(FRegister freg) const; - - void set_dregister_bits(DRegister freg, int64_t value); - void set_dregister(DRegister freg, double value); - - int64_t get_dregister_bits(DRegister freg) const; - double get_dregister(DRegister freg) const; - - int32_t get_sp() const { return get_register(SPREG); } - - // Accessor for the pc. - void set_pc(int32_t value) { pc_ = value; } - int32_t get_pc() const { return pc_; } - - // Accessors for hi, lo registers. - void set_hi_register(int32_t value) { hi_reg_ = value; } - void set_lo_register(int32_t value) { lo_reg_ = value; } - int32_t get_hi_register() const { return hi_reg_; } - int32_t get_lo_register() const { return lo_reg_; } - - int32_t get_fcsr_condition_bit(int32_t cc) const { - if (cc == 0) { - return 23; - } else { - return 24 + cc; - } - } - - void set_fcsr_bit(uint32_t cc, bool value) { - if (value) { - fcsr_ |= (1 << cc); - } else { - fcsr_ &= ~(1 << cc); - } - } - - bool test_fcsr_bit(uint32_t cc) { return fcsr_ & (1 << cc); } - - // Accessors to the internal simulator stack base and top. - uword StackBase() const { return reinterpret_cast(stack_); } - uword StackTop() const; - - // Accessor to the instruction counter. - uint64_t get_icount() const { return icount_; } - - // The thread's top_exit_frame_info refers to a Dart frame in the simulator - // stack. The simulator's top_exit_frame_info refers to a C++ frame in the - // native stack. - uword top_exit_frame_info() const { return top_exit_frame_info_; } - void set_top_exit_frame_info(uword value) { top_exit_frame_info_ = value; } - - // Call on program start. - static void InitOnce(); - - // Dart generally calls into generated code with 4 parameters. This is a - // convenience function, which sets up the simulator state and grabs the - // result on return. When fp_return is true the return value is the D0 - // floating point register. Otherwise, the return value is V1:V0. - int64_t Call(int32_t entry, - int32_t parameter0, - int32_t parameter1, - int32_t parameter2, - int32_t parameter3, - bool fp_return = false, - bool fp_args = false); - - // Implementation of atomic compare and exchange in the same synchronization - // domain as other synchronization primitive instructions (e.g. ldrex, strex). - static uword CompareExchange(uword* address, - uword compare_value, - uword new_value); - static uint32_t CompareExchangeUint32(uint32_t* address, - uint32_t compare_value, - uint32_t new_value); - - // Runtime and native call support. - enum CallKind { - kRuntimeCall, - kLeafRuntimeCall, - kLeafFloatRuntimeCall, - kBootstrapNativeCall, - kNativeCall - }; - static uword RedirectExternalReference(uword function, - CallKind call_kind, - int argument_count); - - static uword FunctionForRedirect(uword redirect); - - void JumpToFrame(uword pc, uword sp, uword fp, Thread* thread); - - private: - // A pc value used to signal the simulator to stop execution. Generally - // the ra is set to this value on transition from native C code to - // simulated execution, so that the simulator can "return" to the native - // C code. - static const uword kEndSimulatingPC = -1; - - // Special registers for the results of div, divu. - int32_t hi_reg_; - int32_t lo_reg_; - - int32_t registers_[kNumberOfCpuRegisters]; - int32_t fregisters_[kNumberOfFRegisters]; - int32_t fcsr_; - uword pc_; - - // Simulator support. - char* stack_; - uint64_t icount_; - bool delay_slot_; - SimulatorSetjmpBuffer* last_setjmp_buffer_; - uword top_exit_frame_info_; - - // Registered breakpoints. - Instr* break_pc_; - int32_t break_instr_; - - // Illegal memory access support. - static bool IsIllegalAddress(uword addr) { return addr < 64 * 1024; } - void HandleIllegalAccess(uword addr, Instr* instr); - - // Read and write memory. - void UnalignedAccess(const char* msg, uword addr, Instr* instr); - - // Handles a legal instruction that the simulator does not implement. - void UnimplementedInstruction(Instr* instr); - - void set_pc(uword value) { pc_ = value; } - - void Format(Instr* instr, const char* format); - - inline int8_t ReadB(uword addr); - inline uint8_t ReadBU(uword addr); - inline int16_t ReadH(uword addr, Instr* instr); - inline uint16_t ReadHU(uword addr, Instr* instr); - inline intptr_t ReadW(uword addr, Instr* instr); - - inline void WriteB(uword addr, uint8_t value); - inline void WriteH(uword addr, uint16_t value, Instr* isntr); - inline void WriteW(uword addr, intptr_t value, Instr* instr); - - inline double ReadD(uword addr, Instr* instr); - inline void WriteD(uword addr, double value, Instr* instr); - - // We keep track of 16 exclusive access address tags across all threads. - // Since we cannot simulate a native context switch, which clears - // the exclusive access state of the local monitor, we associate the thread - // requesting exclusive access to the address tag. - // Multiple threads requesting exclusive access (using the LL instruction) - // to the same address will result in multiple address tags being created for - // the same address, one per thread. - // At any given time, each thread is associated to at most one address tag. - static Mutex* exclusive_access_lock_; - static const int kNumAddressTags = 16; - static struct AddressTag { - Thread* thread; - uword addr; - } exclusive_access_state_[kNumAddressTags]; - static int next_address_tag_; - - // Synchronization primitives support. - void ClearExclusive(); - intptr_t ReadExclusiveW(uword addr, Instr* instr); - intptr_t WriteExclusiveW(uword addr, intptr_t value, Instr* instr); - - // Set access to given address to 'exclusive state' for current thread. - static void SetExclusiveAccess(uword addr); - - // Returns true if the current thread has exclusive access to given address, - // returns false otherwise. In either case, set access to given address to - // 'open state' for all threads. - // If given addr is NULL, set access to 'open state' for current - // thread (CLREX). - static bool HasExclusiveAccessAndOpen(uword addr); - - void DoBranch(Instr* instr, bool taken, bool likely); - void DoBreak(Instr* instr); - - void DecodeSpecial(Instr* instr); - void DecodeSpecial2(Instr* instr); - void DecodeRegImm(Instr* instr); - void DecodeCop1(Instr* instr); - void InstructionDecode(Instr* instr); - - void Execute(); - void ExecuteDelaySlot(); - - // Returns true if tracing of executed instructions is enabled. - bool IsTracingExecution() const; - - // Longjmp support for exceptions. - SimulatorSetjmpBuffer* last_setjmp_buffer() { return last_setjmp_buffer_; } - void set_last_setjmp_buffer(SimulatorSetjmpBuffer* buffer) { - last_setjmp_buffer_ = buffer; - } - - friend class SimulatorDebugger; - friend class SimulatorSetjmpBuffer; - DISALLOW_COPY_AND_ASSIGN(Simulator); -}; - -} // namespace dart - -#endif // RUNTIME_VM_SIMULATOR_MIPS_H_ diff --git a/runtime/vm/snapshot.cc b/runtime/vm/snapshot.cc index 01f99f0a7b1..0098bb7e6fe 100644 --- a/runtime/vm/snapshot.cc +++ b/runtime/vm/snapshot.cc @@ -992,21 +992,6 @@ void AssemblyImageWriter::FrameUnwindPrologue() { assembly_stream_.Print(".setfp r11, sp, #0\n"); #endif -#elif defined(TARGET_ARCH_MIPS) - COMPILE_ASSERT(FP == R30); - COMPILE_ASSERT(RA == R31); - assembly_stream_.Print(".cfi_def_cfa r30, 0\n"); // CFA is fp+0 - assembly_stream_.Print(".cfi_offset r30, 0\n"); // saved fp is *(CFA+0) - assembly_stream_.Print(".cfi_offset r31, 4\n"); // saved pc is *(CFA+4) - // saved sp is CFA+16 - // Should be ".cfi_value_offset sp, 8", but requires gcc newer than late - // 2016 and not supported by Android's libunwind. - // DW_CFA_expression 0x10 - // uleb128 register (sp) 29 - // uleb128 size of operation 2 - // DW_OP_plus_uconst 0x23 - // uleb128 addend 8 - assembly_stream_.Print(".cfi_escape 0x10, 29, 2, 0x23, 8\n"); #endif } diff --git a/runtime/vm/stack_frame.h b/runtime/vm/stack_frame.h index 37f1a33b237..97373766b34 100644 --- a/runtime/vm/stack_frame.h +++ b/runtime/vm/stack_frame.h @@ -17,8 +17,6 @@ #include "vm/stack_frame_arm.h" #elif defined(TARGET_ARCH_ARM64) #include "vm/stack_frame_arm64.h" -#elif defined(TARGET_ARCH_MIPS) -#include "vm/stack_frame_mips.h" #elif defined(TARGET_ARCH_DBC) #include "vm/stack_frame_dbc.h" #else diff --git a/runtime/vm/stack_frame_mips.h b/runtime/vm/stack_frame_mips.h deleted file mode 100644 index 7290b8b7050..00000000000 --- a/runtime/vm/stack_frame_mips.h +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#ifndef RUNTIME_VM_STACK_FRAME_MIPS_H_ -#define RUNTIME_VM_STACK_FRAME_MIPS_H_ - -namespace dart { - -/* MIPS Dart Frame Layout - - | | <- TOS -Callee frame | ... | - | current RA | (PC of current frame) - | callee's PC marker | - +--------------------+ -Current frame | ... T| <- SP of current frame - | first local T| - | caller's PP T| - | CODE_REG T| (current frame's code object) - | caller's FP | <- FP of current frame - | caller's RA | (PC of caller frame) - +--------------------+ -Caller frame | last parameter | <- SP of caller frame - | ... | - - T against a slot indicates it needs to be traversed during GC. -*/ - -static const int kDartFrameFixedSize = 4; // PP, FP, RA, PC marker. -static const int kSavedPcSlotFromSp = -1; - -static const int kFirstObjectSlotFromFp = -1; // Used by GC to traverse stack. - -static const int kFirstLocalSlotFromFp = -3; -static const int kSavedCallerPpSlotFromFp = -2; -static const int kPcMarkerSlotFromFp = -1; -static const int kSavedCallerFpSlotFromFp = 0; -static const int kSavedCallerPcSlotFromFp = 1; -static const int kParamEndSlotFromFp = 1; // One slot past last parameter. -static const int kCallerSpSlotFromFp = 2; - -// Entry and exit frame layout. -static const int kExitLinkSlotFromEntryFp = -24; -COMPILE_ASSERT(kAbiPreservedCpuRegCount == 8); -COMPILE_ASSERT(kAbiPreservedFpuRegCount == 12); - -} // namespace dart - -#endif // RUNTIME_VM_STACK_FRAME_MIPS_H_ diff --git a/runtime/vm/stub_code_mips.cc b/runtime/vm/stub_code_mips.cc deleted file mode 100644 index 294d2858dfa..00000000000 --- a/runtime/vm/stub_code_mips.cc +++ /dev/null @@ -1,2459 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -#include "vm/assembler.h" -#include "vm/compiler.h" -#include "vm/dart_entry.h" -#include "vm/flow_graph_compiler.h" -#include "vm/heap.h" -#include "vm/instructions.h" -#include "vm/object_store.h" -#include "vm/runtime_entry.h" -#include "vm/stack_frame.h" -#include "vm/stub_code.h" -#include "vm/tags.h" - -#define __ assembler-> - -namespace dart { - -DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects."); -DEFINE_FLAG(bool, - use_slow_path, - false, - "Set to true for debugging & verifying the slow paths."); -DECLARE_FLAG(bool, trace_optimized_ic_calls); - -// Input parameters: -// RA : return address. -// SP : address of last argument in argument array. -// SP + 4*S4 - 4 : address of first argument in argument array. -// SP + 4*S4 : address of return value. -// S5 : address of the runtime function to call. -// S4 : number of arguments to the call. -void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { - const intptr_t thread_offset = NativeArguments::thread_offset(); - const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); - const intptr_t argv_offset = NativeArguments::argv_offset(); - const intptr_t retval_offset = NativeArguments::retval_offset(); - - __ SetPrologueOffset(); - __ Comment("CallToRuntimeStub"); - __ EnterStubFrame(); - - // Save exit frame information to enable stack walking as we are about - // to transition to Dart VM C++ code. - __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); - -#if defined(DEBUG) - { - Label ok; - // Check that we are always entering from Dart code. - __ lw(T0, Assembler::VMTagAddress()); - __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); - __ Stop("Not coming from Dart code."); - __ Bind(&ok); - } -#endif - - // Mark that the thread is executing VM code. - __ sw(S5, Assembler::VMTagAddress()); - - // Reserve space for arguments and align frame before entering C++ world. - // NativeArguments are passed in registers. - ASSERT(sizeof(NativeArguments) == 4 * kWordSize); - __ ReserveAlignedFrameSpace(4 * kWordSize); // Reserve space for arguments. - - // Pass NativeArguments structure by value and call runtime. - // Registers A0, A1, A2, and A3 are used. - - ASSERT(thread_offset == 0 * kWordSize); - // Set thread in NativeArgs. - __ mov(A0, THR); - - // There are no runtime calls to closures, so we do not need to set the tag - // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. - ASSERT(argc_tag_offset == 1 * kWordSize); - __ mov(A1, S4); // Set argc in NativeArguments. - - ASSERT(argv_offset == 2 * kWordSize); - __ sll(A2, S4, 2); - __ addu(A2, FP, A2); // Compute argv. - // Set argv in NativeArguments. - __ addiu(A2, A2, Immediate(kParamEndSlotFromFp * kWordSize)); - - - // Call runtime or redirection via simulator. - // We defensively always jalr through T9 because it is sometimes required by - // the MIPS ABI. - __ mov(T9, S5); - __ jalr(T9); - - ASSERT(retval_offset == 3 * kWordSize); - // Retval is next to 1st argument. - __ delay_slot()->addiu(A3, A2, Immediate(kWordSize)); - __ Comment("CallToRuntimeStub return"); - - // Mark that the thread is executing Dart code. - __ LoadImmediate(A2, VMTag::kDartTagId); - __ sw(A2, Assembler::VMTagAddress()); - - // Reset exit frame information in Isolate structure. - __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); - - __ LeaveStubFrameAndReturn(); -} - - -// Print the stop message. -DEFINE_LEAF_RUNTIME_ENTRY(void, PrintStopMessage, 1, const char* message) { - OS::Print("Stop message: %s\n", message); -} -END_LEAF_RUNTIME_ENTRY - - -// Input parameters: -// A0 : stop message (const char*). -// Must preserve all registers. -void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) { - __ EnterCallRuntimeFrame(0); - // Call the runtime leaf function. A0 already contains the parameter. - __ CallRuntime(kPrintStopMessageRuntimeEntry, 1); - __ LeaveCallRuntimeFrame(); - __ Ret(); -} - - -// Input parameters: -// RA : return address. -// SP : address of return value. -// T5 : address of the native function to call. -// A2 : address of first argument in argument array. -// A1 : argc_tag including number of arguments and function kind. -static void GenerateCallNativeWithWrapperStub(Assembler* assembler, - Address wrapper) { - const intptr_t thread_offset = NativeArguments::thread_offset(); - const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); - const intptr_t argv_offset = NativeArguments::argv_offset(); - const intptr_t retval_offset = NativeArguments::retval_offset(); - - __ SetPrologueOffset(); - __ Comment("CallNativeCFunctionStub"); - __ EnterStubFrame(); - - // Save exit frame information to enable stack walking as we are about - // to transition to native code. - __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); - -#if defined(DEBUG) - { - Label ok; - // Check that we are always entering from Dart code. - __ lw(T0, Assembler::VMTagAddress()); - __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); - __ Stop("Not coming from Dart code."); - __ Bind(&ok); - } -#endif - - // Mark that the thread is executing native code. - __ sw(T5, Assembler::VMTagAddress()); - - // Initialize NativeArguments structure and call native function. - // Registers A0, A1, A2, and A3 are used. - - ASSERT(thread_offset == 0 * kWordSize); - // Set thread in NativeArgs. - __ mov(A0, THR); - - // There are no native calls to closures, so we do not need to set the tag - // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. - ASSERT(argc_tag_offset == 1 * kWordSize); - // Set argc in NativeArguments: A1 already contains argc. - - ASSERT(argv_offset == 2 * kWordSize); - // Set argv in NativeArguments: A2 already contains argv. - - ASSERT(retval_offset == 3 * kWordSize); - // Set retval in NativeArgs. - __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); - - // Passing the structure by value as in runtime calls would require changing - // Dart API for native functions. - // For now, space is reserved on the stack and we pass a pointer to it. - __ addiu(SP, SP, Immediate(-4 * kWordSize)); - __ sw(A3, Address(SP, 3 * kWordSize)); - __ sw(A2, Address(SP, 2 * kWordSize)); - __ sw(A1, Address(SP, 1 * kWordSize)); - __ sw(A0, Address(SP, 0 * kWordSize)); - __ mov(A0, SP); // Pass the pointer to the NativeArguments. - - - __ mov(A1, T5); // Pass the function entrypoint. - __ ReserveAlignedFrameSpace(2 * kWordSize); // Just passing A0, A1. - - // Call native wrapper function or redirection via simulator. - __ lw(T9, wrapper); - __ jalr(T9); - __ Comment("CallNativeCFunctionStub return"); - - // Mark that the thread is executing Dart code. - __ LoadImmediate(A2, VMTag::kDartTagId); - __ sw(A2, Assembler::VMTagAddress()); - - // Reset exit frame information in Isolate structure. - __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); - - __ LeaveStubFrameAndReturn(); -} - - -void StubCode::GenerateCallNoScopeNativeStub(Assembler* assembler) { - GenerateCallNativeWithWrapperStub( - assembler, - Address(THR, Thread::no_scope_native_wrapper_entry_point_offset())); -} - - -void StubCode::GenerateCallAutoScopeNativeStub(Assembler* assembler) { - GenerateCallNativeWithWrapperStub( - assembler, - Address(THR, Thread::auto_scope_native_wrapper_entry_point_offset())); -} - - -// Input parameters: -// RA : return address. -// SP : address of return value. -// T5 : address of the native function to call. -// A2 : address of first argument in argument array. -// A1 : argc_tag including number of arguments and function kind. -void StubCode::GenerateCallBootstrapNativeStub(Assembler* assembler) { - const intptr_t thread_offset = NativeArguments::thread_offset(); - const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); - const intptr_t argv_offset = NativeArguments::argv_offset(); - const intptr_t retval_offset = NativeArguments::retval_offset(); - - __ SetPrologueOffset(); - __ Comment("CallNativeCFunctionStub"); - __ EnterStubFrame(); - - // Save exit frame information to enable stack walking as we are about - // to transition to native code. - __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); - -#if defined(DEBUG) - { - Label ok; - // Check that we are always entering from Dart code. - __ lw(T0, Assembler::VMTagAddress()); - __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); - __ Stop("Not coming from Dart code."); - __ Bind(&ok); - } -#endif - - // Mark that the thread is executing native code. - __ sw(T5, Assembler::VMTagAddress()); - - // Initialize NativeArguments structure and call native function. - // Registers A0, A1, A2, and A3 are used. - - ASSERT(thread_offset == 0 * kWordSize); - // Set thread in NativeArgs. - __ mov(A0, THR); - - // There are no native calls to closures, so we do not need to set the tag - // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. - ASSERT(argc_tag_offset == 1 * kWordSize); - // Set argc in NativeArguments: A1 already contains argc. - - ASSERT(argv_offset == 2 * kWordSize); - // Set argv in NativeArguments: A2 already contains argv. - - ASSERT(retval_offset == 3 * kWordSize); - // Set retval in NativeArgs. - __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); - - // Passing the structure by value as in runtime calls would require changing - // Dart API for native functions. - // For now, space is reserved on the stack and we pass a pointer to it. - __ addiu(SP, SP, Immediate(-4 * kWordSize)); - __ sw(A3, Address(SP, 3 * kWordSize)); - __ sw(A2, Address(SP, 2 * kWordSize)); - __ sw(A1, Address(SP, 1 * kWordSize)); - __ sw(A0, Address(SP, 0 * kWordSize)); - __ mov(A0, SP); // Pass the pointer to the NativeArguments. - - __ ReserveAlignedFrameSpace(kWordSize); // Just passing A0. - - // Call native function or redirection via simulator. - - // We defensively always jalr through T9 because it is sometimes required by - // the MIPS ABI. - __ mov(T9, T5); - __ jalr(T9); - __ Comment("CallNativeCFunctionStub return"); - - // Mark that the thread is executing Dart code. - __ LoadImmediate(A2, VMTag::kDartTagId); - __ sw(A2, Assembler::VMTagAddress()); - - // Reset exit frame information in Isolate structure. - __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); - - __ LeaveStubFrameAndReturn(); -} - - -// Input parameters: -// S4: arguments descriptor array. -void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { - __ Comment("CallStaticFunctionStub"); - __ EnterStubFrame(); - // Setup space on stack for return value and preserve arguments descriptor. - - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(S4, Address(SP, 1 * kWordSize)); - __ sw(ZR, Address(SP, 0 * kWordSize)); - - __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); - __ Comment("CallStaticFunctionStub return"); - - // Get Code object result and restore arguments descriptor array. - __ lw(CODE_REG, Address(SP, 0 * kWordSize)); - __ lw(S4, Address(SP, 1 * kWordSize)); - __ addiu(SP, SP, Immediate(2 * kWordSize)); - - __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); - - // Remove the stub frame as we are about to jump to the dart function. - __ LeaveStubFrameAndReturn(T0); -} - - -// Called from a static call only when an invalid code has been entered -// (invalid because its function was optimized or deoptimized). -// S4: arguments descriptor array. -void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { - // Load code pointer to this stub from the thread: - // The one that is passed in, is not correct - it points to the code object - // that needs to be replaced. - __ lw(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset())); - // Create a stub frame as we are pushing some objects on the stack before - // calling into the runtime. - __ EnterStubFrame(); - // Setup space on stack for return value and preserve arguments descriptor. - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(S4, Address(SP, 1 * kWordSize)); - __ sw(ZR, Address(SP, 0 * kWordSize)); - __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); - // Get Code object result and restore arguments descriptor array. - __ lw(CODE_REG, Address(SP, 0 * kWordSize)); - __ lw(S4, Address(SP, 1 * kWordSize)); - __ addiu(SP, SP, Immediate(2 * kWordSize)); - - // Jump to the dart function. - __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); - - // Remove the stub frame. - __ LeaveStubFrameAndReturn(T0); -} - - -// Called from object allocate instruction when the allocation stub has been -// disabled. -void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { - // Load code pointer to this stub from the thread: - // The one that is passed in, is not correct - it points to the code object - // that needs to be replaced. - __ lw(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset())); - __ EnterStubFrame(); - // Setup space on stack for return value. - __ addiu(SP, SP, Immediate(-1 * kWordSize)); - __ sw(ZR, Address(SP, 0 * kWordSize)); - __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); - // Get Code object result. - __ lw(CODE_REG, Address(SP, 0 * kWordSize)); - __ addiu(SP, SP, Immediate(1 * kWordSize)); - - // Jump to the dart function. - __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); - - // Remove the stub frame. - __ LeaveStubFrameAndReturn(T0); -} - - -// Input parameters: -// A1: Smi-tagged argument count, may be zero. -// FP[kParamEndSlotFromFp + 1]: Last argument. -static void PushArgumentsArray(Assembler* assembler) { - __ Comment("PushArgumentsArray"); - // Allocate array to store arguments of caller. - __ LoadObject(A0, Object::null_object()); - // A0: Null element type for raw Array. - // A1: Smi-tagged argument count, may be zero. - __ BranchLink(*StubCode::AllocateArray_entry()); - __ Comment("PushArgumentsArray return"); - // V0: newly allocated array. - // A1: Smi-tagged argument count, may be zero (was preserved by the stub). - __ Push(V0); // Array is in V0 and on top of stack. - __ sll(T1, A1, 1); - __ addu(T1, FP, T1); - __ AddImmediate(T1, kParamEndSlotFromFp * kWordSize); - // T1: address of first argument on stack. - // T2: address of first argument in array. - - Label loop, loop_exit; - __ blez(A1, &loop_exit); - __ delay_slot()->addiu(T2, V0, - Immediate(Array::data_offset() - kHeapObjectTag)); - __ Bind(&loop); - __ lw(T3, Address(T1)); - __ addiu(A1, A1, Immediate(-Smi::RawValue(1))); - __ addiu(T1, T1, Immediate(-kWordSize)); - __ addiu(T2, T2, Immediate(kWordSize)); - __ bgez(A1, &loop); - __ delay_slot()->sw(T3, Address(T2, -kWordSize)); - __ Bind(&loop_exit); -} - - -// Used by eager and lazy deoptimization. Preserve result in V0 if necessary. -// This stub translates optimized frame into unoptimized frame. The optimized -// frame can contain values in registers and on stack, the unoptimized -// frame contains all values on stack. -// Deoptimization occurs in following steps: -// - Push all registers that can contain values. -// - Call C routine to copy the stack and saved registers into temporary buffer. -// - Adjust caller's frame to correct unoptimized frame size. -// - Fill the unoptimized frame. -// - Materialize objects that require allocation (e.g. Double instances). -// GC can occur only after frame is fully rewritten. -// Stack after EnterFrame(...) below: -// +------------------+ -// | Saved PP | <- TOS -// +------------------+ -// | Saved CODE_REG | -// +------------------+ -// | Saved FP | <- FP of stub -// +------------------+ -// | Saved LR | (deoptimization point) -// +------------------+ -// | Saved CODE_REG | -// +------------------+ -// | ... | <- SP of optimized frame -// -// Parts of the code cannot GC, part of the code can GC. -static void GenerateDeoptimizationSequence(Assembler* assembler, - DeoptStubKind kind) { - const intptr_t kPushedRegistersSize = - kNumberOfCpuRegisters * kWordSize + kNumberOfFRegisters * kWordSize; - - __ SetPrologueOffset(); - __ Comment("GenerateDeoptimizationSequence"); - // DeoptimizeCopyFrame expects a Dart frame. - __ EnterStubFrame(kPushedRegistersSize); - - // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry - // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. - const intptr_t saved_result_slot_from_fp = - kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); - const intptr_t saved_exception_slot_from_fp = - kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); - const intptr_t saved_stacktrace_slot_from_fp = - kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V1); - // Result in V0 is preserved as part of pushing all registers below. - - // Push registers in their enumeration order: lowest register number at - // lowest address. - for (int i = 0; i < kNumberOfCpuRegisters; i++) { - const int slot = kNumberOfCpuRegisters - i; - Register reg = static_cast(i); - if (reg == CODE_REG) { - // Save the original value of CODE_REG pushed before invoking this stub - // instead of the value used to call this stub. - COMPILE_ASSERT(TMP < CODE_REG); // Assert TMP is pushed first. - __ lw(TMP, Address(FP, kCallerSpSlotFromFp * kWordSize)); - __ sw(TMP, Address(SP, kPushedRegistersSize - slot * kWordSize)); - } else { - __ sw(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); - } - } - for (int i = 0; i < kNumberOfFRegisters; i++) { - // These go below the CPU registers. - const int slot = kNumberOfCpuRegisters + kNumberOfFRegisters - i; - FRegister reg = static_cast(i); - __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); - } - - __ mov(A0, SP); // Pass address of saved registers block. - bool is_lazy = - (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow); - __ LoadImmediate(A1, is_lazy ? 1 : 0); - __ ReserveAlignedFrameSpace(1 * kWordSize); - __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); - // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. - - if (kind == kLazyDeoptFromReturn) { - // Restore result into T1 temporarily. - __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); - } else if (kind == kLazyDeoptFromThrow) { - // Restore result into T1 temporarily. - __ lw(T1, Address(FP, saved_exception_slot_from_fp * kWordSize)); - __ lw(T2, Address(FP, saved_stacktrace_slot_from_fp * kWordSize)); - } - - __ RestoreCodePointer(); - __ LeaveDartFrame(); - __ subu(SP, FP, V0); - - // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there - // is no need to set the correct PC marker or load PP, since they get patched. - __ EnterStubFrame(); - - __ mov(A0, FP); // Get last FP address. - if (kind == kLazyDeoptFromReturn) { - __ Push(T1); // Preserve result as first local. - } else if (kind == kLazyDeoptFromThrow) { - __ Push(T1); // Preserve exception as first local. - __ Push(T2); // Preserve stacktrace as second local. - } - __ ReserveAlignedFrameSpace(1 * kWordSize); - __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in A0. - if (kind == kLazyDeoptFromReturn) { - // Restore result into T1. - __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); - } else if (kind == kLazyDeoptFromThrow) { - // Restore result into T1. - __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); - __ lw(T2, Address(FP, (kFirstLocalSlotFromFp - 1) * kWordSize)); - } - // Code above cannot cause GC. - __ RestoreCodePointer(); - __ LeaveStubFrame(); - - // Frame is fully rewritten at this point and it is safe to perform a GC. - // Materialize any objects that were deferred by FillFrame because they - // require allocation. - // Enter stub frame with loading PP. The caller's PP is not materialized yet. - __ EnterStubFrame(); - if (kind == kLazyDeoptFromReturn) { - __ Push(T1); // Preserve result, it will be GC-d here. - } else if (kind == kLazyDeoptFromThrow) { - __ Push(T1); // Preserve exception, it will be GC-d here. - __ Push(T2); // Preserve stacktrace, it will be GC-d here. - } - __ PushObject(Smi::ZoneHandle()); // Space for the result. - __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); - // Result tells stub how many bytes to remove from the expression stack - // of the bottom-most frame. They were used as materialization arguments. - __ Pop(T1); - if (kind == kLazyDeoptFromReturn) { - __ Pop(V0); // Restore result. - } else if (kind == kLazyDeoptFromThrow) { - __ Pop(V1); // Restore stacktrace. - __ Pop(V0); // Restore exception. - } - __ LeaveStubFrame(); - // Remove materialization arguments. - __ SmiUntag(T1); - __ addu(SP, SP, T1); - // The caller is responsible for emitting the return instruction. -} - -// V0: result, must be preserved -void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) { - // Push zap value instead of CODE_REG for lazy deopt. - __ LoadImmediate(TMP, kZapCodeReg); - __ Push(TMP); - // Return address for "call" to deopt stub. - __ LoadImmediate(RA, kZapReturnAddress); - __ lw(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset())); - GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn); - __ Ret(); -} - - -// V0: exception, must be preserved -// V1: stacktrace, must be preserved -void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) { - // Push zap value instead of CODE_REG for lazy deopt. - __ LoadImmediate(TMP, kZapCodeReg); - __ Push(TMP); - // Return address for "call" to deopt stub. - __ LoadImmediate(RA, kZapReturnAddress); - __ lw(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset())); - GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow); - __ Ret(); -} - - -void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { - GenerateDeoptimizationSequence(assembler, kEagerDeopt); - __ Ret(); -} - - -static void GenerateDispatcherCode(Assembler* assembler, - Label* call_target_function) { - __ Comment("NoSuchMethodDispatch"); - // When lazily generated invocation dispatchers are disabled, the - // miss-handler may return null. - __ BranchNotEqual(T0, Object::null_object(), call_target_function); - __ EnterStubFrame(); - // Load the receiver. - __ lw(A1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - __ sll(TMP, A1, 1); // A1 is a Smi. - __ addu(TMP, FP, TMP); - __ lw(T6, Address(TMP, kParamEndSlotFromFp * kWordSize)); - - // Push space for the return value. - // Push the receiver. - // Push ICData/MegamorphicCache object. - // Push arguments descriptor array. - // Push original arguments array. - __ addiu(SP, SP, Immediate(-4 * kWordSize)); - __ sw(ZR, Address(SP, 3 * kWordSize)); - __ sw(T6, Address(SP, 2 * kWordSize)); - __ sw(S5, Address(SP, 1 * kWordSize)); - __ sw(S4, Address(SP, 0 * kWordSize)); - - // Adjust arguments count. - __ lw(TMP, FieldAddress(S4, ArgumentsDescriptor::type_args_len_offset())); - Label args_count_ok; - __ BranchEqual(TMP, Immediate(0), &args_count_ok); - __ AddImmediate(A1, A1, Smi::RawValue(1)); // Include the type arguments. - __ Bind(&args_count_ok); - - // A1: Smi-tagged arguments array length. - PushArgumentsArray(assembler); - const intptr_t kNumArgs = 4; - __ CallRuntime(kInvokeNoSuchMethodDispatcherRuntimeEntry, kNumArgs); - __ lw(V0, Address(SP, 4 * kWordSize)); // Return value. - __ addiu(SP, SP, Immediate(5 * kWordSize)); - __ LeaveStubFrame(); - __ Ret(); -} - - -void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { - __ EnterStubFrame(); - - // Load the receiver. - __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - __ sll(T2, T2, 1); // T2 is a Smi. - __ addu(TMP, FP, T2); - __ lw(T6, Address(TMP, kParamEndSlotFromFp * kWordSize)); - - // Preserve IC data and arguments descriptor. - __ addiu(SP, SP, Immediate(-6 * kWordSize)); - __ sw(S5, Address(SP, 5 * kWordSize)); - __ sw(S4, Address(SP, 4 * kWordSize)); - - // Push space for the return value. - // Push the receiver. - // Push IC data object. - // Push arguments descriptor array. - __ sw(ZR, Address(SP, 3 * kWordSize)); - __ sw(T6, Address(SP, 2 * kWordSize)); - __ sw(S5, Address(SP, 1 * kWordSize)); - __ sw(S4, Address(SP, 0 * kWordSize)); - - __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); - - __ lw(T0, Address(SP, 3 * kWordSize)); // Get result function. - __ lw(S4, Address(SP, 4 * kWordSize)); // Restore argument descriptor. - __ lw(S5, Address(SP, 5 * kWordSize)); // Restore IC data. - __ addiu(SP, SP, Immediate(6 * kWordSize)); - - __ RestoreCodePointer(); - __ LeaveStubFrame(); - - if (!FLAG_lazy_dispatchers) { - Label call_target_function; - GenerateDispatcherCode(assembler, &call_target_function); - __ Bind(&call_target_function); - } - - __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); - __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); - __ jr(T2); -} - - -// Called for inline allocation of arrays. -// Input parameters: -// RA: return address. -// A1: Array length as Smi (must be preserved). -// A0: array element type (either NULL or an instantiated type). -// NOTE: A1 cannot be clobbered here as the caller relies on it being saved. -// The newly allocated object is returned in V0. -void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { - __ Comment("AllocateArrayStub"); - Label slow_case; - // Compute the size to be allocated, it is based on the array length - // and is computed as: - // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). - __ mov(T3, A1); // Array length. - - // Check that length is a positive Smi. - __ andi(CMPRES1, T3, Immediate(kSmiTagMask)); - if (FLAG_use_slow_path) { - __ b(&slow_case); - } else { - __ bne(CMPRES1, ZR, &slow_case); - } - __ bltz(T3, &slow_case); - - // Check for maximum allowed length. - const intptr_t max_len = - reinterpret_cast(Smi::New(Array::kMaxElements)); - __ BranchUnsignedGreater(T3, Immediate(max_len), &slow_case); - - const intptr_t cid = kArrayCid; - NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, T4, &slow_case)); - - const intptr_t fixed_size_plus_alignment_padding = - sizeof(RawArray) + kObjectAlignment - 1; - __ LoadImmediate(T2, fixed_size_plus_alignment_padding); - __ sll(T3, T3, 1); // T3 is a Smi. - __ addu(T2, T2, T3); - ASSERT(kSmiTagShift == 1); - __ LoadImmediate(T3, ~(kObjectAlignment - 1)); - __ and_(T2, T2, T3); - - // T2: Allocation size. - - Heap::Space space = Heap::kNew; - __ lw(T3, Address(THR, Thread::heap_offset())); - // Potential new object start. - __ lw(T0, Address(T3, Heap::TopOffset(space))); - - __ addu(T1, T0, T2); // Potential next object start. - __ BranchUnsignedLess(T1, T0, &slow_case); // Branch on unsigned overflow. - - // Check if the allocation fits into the remaining space. - // T0: potential new object start. - // T1: potential next object start. - // T2: allocation size. - // T3: heap. - __ lw(T4, Address(T3, Heap::EndOffset(space))); - __ BranchUnsignedGreaterEqual(T1, T4, &slow_case); - - // Successfully allocated the object(s), now update top to point to - // next object start and initialize the object. - // T3: heap. - __ sw(T1, Address(T3, Heap::TopOffset(space))); - __ addiu(T0, T0, Immediate(kHeapObjectTag)); - NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T4, space)); - - // Initialize the tags. - // T0: new object start as a tagged pointer. - // T1: new object end address. - // T2: allocation size. - { - Label overflow, done; - const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; - - __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), - &overflow); - __ b(&done); - __ delay_slot()->sll(T2, T2, shift); - __ Bind(&overflow); - __ mov(T2, ZR); - __ Bind(&done); - - // Get the class index and insert it into the tags. - // T2: size and bit tags. - __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); - __ or_(T2, T2, TMP); - __ sw(T2, FieldAddress(T0, Array::tags_offset())); // Store tags. - } - - // T0: new object start as a tagged pointer. - // T1: new object end address. - // Store the type argument field. - __ StoreIntoObjectNoBarrier( - T0, FieldAddress(T0, Array::type_arguments_offset()), A0); - - // Set the length field. - __ StoreIntoObjectNoBarrier(T0, FieldAddress(T0, Array::length_offset()), A1); - - __ LoadObject(T7, Object::null_object()); - // Initialize all array elements to raw_null. - // T0: new object start as a tagged pointer. - // T1: new object end address. - // T2: iterator which initially points to the start of the variable - // data area to be initialized. - // T7: null. - __ AddImmediate(T2, T0, sizeof(RawArray) - kHeapObjectTag); - - Label done; - Label init_loop; - __ Bind(&init_loop); - __ BranchUnsignedGreaterEqual(T2, T1, &done); - __ sw(T7, Address(T2, 0)); - __ b(&init_loop); - __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); - __ Bind(&done); - - __ Ret(); // Returns the newly allocated object in V0. - __ delay_slot()->mov(V0, T0); - - // Unable to allocate the array using the fast inline code, just call - // into the runtime. - __ Bind(&slow_case); - // Create a stub frame as we are pushing some objects on the stack before - // calling into the runtime. - __ EnterStubFrame(); - // Setup space on stack for return value. - // Push array length as Smi and element type. - __ addiu(SP, SP, Immediate(-3 * kWordSize)); - __ sw(ZR, Address(SP, 2 * kWordSize)); - __ sw(A1, Address(SP, 1 * kWordSize)); - __ sw(A0, Address(SP, 0 * kWordSize)); - __ CallRuntime(kAllocateArrayRuntimeEntry, 2); - __ Comment("AllocateArrayStub return"); - // Pop arguments; result is popped in IP. - __ lw(V0, Address(SP, 2 * kWordSize)); - __ lw(A1, Address(SP, 1 * kWordSize)); - __ lw(A0, Address(SP, 0 * kWordSize)); - __ addiu(SP, SP, Immediate(3 * kWordSize)); - - __ LeaveStubFrameAndReturn(); -} - - -// Called when invoking Dart code from C++ (VM code). -// Input parameters: -// RA : points to return address. -// A0 : code object of the Dart function to call. -// A1 : arguments descriptor array. -// A2 : arguments array. -// A3 : current thread. -void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { - // Save frame pointer coming in. - __ Comment("InvokeDartCodeStub"); - __ EnterFrame(); - - // Push code object to PC marker slot. - __ lw(TMP, Address(A3, Thread::invoke_dart_code_stub_offset())); - __ Push(TMP); - - // Save new context and C++ ABI callee-saved registers. - - // The saved vm tag, top resource, and top exit frame info. - const intptr_t kPreservedSlots = 3; - const intptr_t kPreservedRegSpace = - kWordSize * - (kAbiPreservedCpuRegCount + kAbiPreservedFpuRegCount + kPreservedSlots); - - __ addiu(SP, SP, Immediate(-kPreservedRegSpace)); - for (int i = S0; i <= S7; i++) { - Register r = static_cast(i); - const intptr_t slot = i - S0 + kPreservedSlots; - __ sw(r, Address(SP, slot * kWordSize)); - } - - for (intptr_t i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; - i++) { - FRegister r = static_cast(i); - const intptr_t slot = kAbiPreservedCpuRegCount + kPreservedSlots + i - - kAbiFirstPreservedFpuReg; - __ swc1(r, Address(SP, slot * kWordSize)); - } - - // We now load the pool pointer(PP) with a GC safe value as we are about - // to invoke dart code. - __ LoadImmediate(PP, 0); - - // Set up THR, which caches the current thread in Dart code. - if (THR != A3) { - __ mov(THR, A3); - } - - // Save the current VMTag on the stack. - __ lw(T1, Assembler::VMTagAddress()); - __ sw(T1, Address(SP, 2 * kWordSize)); - - // Mark that the thread is executing Dart code. - __ LoadImmediate(T0, VMTag::kDartTagId); - __ sw(T0, Assembler::VMTagAddress()); - - // Save top resource and top exit frame info. Use T0 as a temporary register. - // StackFrameIterator reads the top exit frame info saved in this frame. - __ lw(T0, Address(THR, Thread::top_resource_offset())); - __ sw(ZR, Address(THR, Thread::top_resource_offset())); - __ sw(T0, Address(SP, 1 * kWordSize)); - __ lw(T0, Address(THR, Thread::top_exit_frame_info_offset())); - __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); - // kExitLinkSlotFromEntryFp must be kept in sync with the code below. - ASSERT(kExitLinkSlotFromEntryFp == -24); - __ sw(T0, Address(SP, 0 * kWordSize)); - - // After the call, The stack pointer is restored to this location. - // Pushed S0-7, F20-31, T0, T0, T1 = 23. - - // Load arguments descriptor array into S4, which is passed to Dart code. - __ lw(S4, Address(A1, VMHandles::kOffsetOfRawPtrInHandle)); - - // No need to check for type args, disallowed by DartEntry::InvokeFunction. - // Load number of arguments into S5. - __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - __ SmiUntag(T1); - - // Compute address of 'arguments array' data area into A2. - __ lw(A2, Address(A2, VMHandles::kOffsetOfRawPtrInHandle)); - - // Set up arguments for the Dart call. - Label push_arguments; - Label done_push_arguments; - __ beq(T1, ZR, &done_push_arguments); // check if there are arguments. - __ delay_slot()->addiu(A2, A2, - Immediate(Array::data_offset() - kHeapObjectTag)); - __ mov(A1, ZR); - __ Bind(&push_arguments); - __ lw(A3, Address(A2)); - __ Push(A3); - __ addiu(A1, A1, Immediate(1)); - __ BranchSignedLess(A1, T1, &push_arguments); - __ delay_slot()->addiu(A2, A2, Immediate(kWordSize)); - - __ Bind(&done_push_arguments); - - // Call the Dart code entrypoint. - // We are calling into Dart code, here, so there is no need to call through - // T9 to match the ABI. - __ lw(CODE_REG, Address(A0, VMHandles::kOffsetOfRawPtrInHandle)); - __ lw(A0, FieldAddress(CODE_REG, Code::entry_point_offset())); - __ jalr(A0); // S4 is the arguments descriptor array. - __ Comment("InvokeDartCodeStub return"); - - // Get rid of arguments pushed on the stack. - __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); - - - // Restore the current VMTag from the stack. - __ lw(T1, Address(SP, 2 * kWordSize)); - __ sw(T1, Assembler::VMTagAddress()); - - // Restore the saved top resource and top exit frame info back into the - // Isolate structure. Uses T0 as a temporary register for this. - __ lw(T0, Address(SP, 1 * kWordSize)); - __ sw(T0, Address(THR, Thread::top_resource_offset())); - __ lw(T0, Address(SP, 0 * kWordSize)); - __ sw(T0, Address(THR, Thread::top_exit_frame_info_offset())); - - // Restore C++ ABI callee-saved registers. - for (int i = S0; i <= S7; i++) { - Register r = static_cast(i); - const intptr_t slot = i - S0 + kPreservedSlots; - __ lw(r, Address(SP, slot * kWordSize)); - } - - for (intptr_t i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; - i++) { - FRegister r = static_cast(i); - const intptr_t slot = kAbiPreservedCpuRegCount + kPreservedSlots + i - - kAbiFirstPreservedFpuReg; - __ lwc1(r, Address(SP, slot * kWordSize)); - } - - __ addiu(SP, SP, Immediate(kPreservedRegSpace)); - - // Restore the frame pointer and return. - __ LeaveFrameAndReturn(); -} - - -// Called for inline allocation of contexts. -// Input: -// T1: number of context variables. -// Output: -// V0: new allocated RawContext object. -void StubCode::GenerateAllocateContextStub(Assembler* assembler) { - __ Comment("AllocateContext"); - if (FLAG_inline_alloc) { - Label slow_case; - // First compute the rounded instance size. - // T1: number of context variables. - intptr_t fixed_size_plus_alignment_padding = - sizeof(RawContext) + kObjectAlignment - 1; - __ LoadImmediate(T2, fixed_size_plus_alignment_padding); - __ sll(T0, T1, 2); - __ addu(T2, T2, T0); - ASSERT(kSmiTagShift == 1); - __ LoadImmediate(T0, ~((kObjectAlignment)-1)); - __ and_(T2, T2, T0); - - NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, T4, &slow_case)); - // Now allocate the object. - // T1: number of context variables. - // T2: object size. - const intptr_t cid = kContextCid; - Heap::Space space = Heap::kNew; - __ lw(T5, Address(THR, Thread::heap_offset())); - __ lw(V0, Address(T5, Heap::TopOffset(space))); - __ addu(T3, T2, V0); - - // Check if the allocation fits into the remaining space. - // V0: potential new object. - // T1: number of context variables. - // T2: object size. - // T3: potential next object start. - // T5: heap. - __ lw(CMPRES1, Address(T5, Heap::EndOffset(space))); - if (FLAG_use_slow_path) { - __ b(&slow_case); - } else { - __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case); - } - - // Successfully allocated the object, now update top to point to - // next object start and initialize the object. - // V0: new object. - // T1: number of context variables. - // T2: object size. - // T3: next object start. - // T5: heap. - __ sw(T3, Address(T5, Heap::TopOffset(space))); - __ addiu(V0, V0, Immediate(kHeapObjectTag)); - NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T5, space)); - - // Calculate the size tag. - // V0: new object. - // T1: number of context variables. - // T2: object size. - const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; - __ LoadImmediate(TMP, RawObject::SizeTag::kMaxSizeTag); - __ sltu(CMPRES1, TMP, T2); // CMPRES1 = T2 > TMP ? 1 : 0. - __ movn(T2, ZR, CMPRES1); // T2 = CMPRES1 != 0 ? 0 : T2. - __ sll(TMP, T2, shift); // TMP = T2 << shift. - __ movz(T2, TMP, CMPRES1); // T2 = CMPRES1 == 0 ? TMP : T2. - - // Get the class index and insert it into the tags. - // T2: size and bit tags. - __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); - __ or_(T2, T2, TMP); - __ sw(T2, FieldAddress(V0, Context::tags_offset())); - - // Setup up number of context variables field. - // V0: new object. - // T1: number of context variables as integer value (not object). - __ sw(T1, FieldAddress(V0, Context::num_variables_offset())); - - __ LoadObject(T7, Object::null_object()); - - // Initialize the context variables. - // V0: new object. - // T1: number of context variables. - Label loop, loop_exit; - __ blez(T1, &loop_exit); - // Setup the parent field. - __ delay_slot()->sw(T7, FieldAddress(V0, Context::parent_offset())); - __ AddImmediate(T3, V0, Context::variable_offset(0) - kHeapObjectTag); - __ sll(T1, T1, 2); - __ Bind(&loop); - __ addiu(T1, T1, Immediate(-kWordSize)); - __ addu(T4, T3, T1); - __ bgtz(T1, &loop); - __ delay_slot()->sw(T7, Address(T4)); - __ Bind(&loop_exit); - - // Done allocating and initializing the context. - // V0: new object. - __ Ret(); - - __ Bind(&slow_case); - } - // Create a stub frame as we are pushing some objects on the stack before - // calling into the runtime. - __ EnterStubFrame(); - // Setup space on stack for return value. - __ SmiTag(T1); - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ LoadObject(TMP, Object::null_object()); - __ sw(TMP, Address(SP, 1 * kWordSize)); // Store null. - __ sw(T1, Address(SP, 0 * kWordSize)); - __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context. - __ lw(V0, Address(SP, 1 * kWordSize)); // Get the new context. - __ addiu(SP, SP, Immediate(2 * kWordSize)); // Pop argument and return. - - // V0: new object - // Restore the frame pointer. - __ LeaveStubFrameAndReturn(); -} - - -// Helper stub to implement Assembler::StoreIntoObject. -// Input parameters: -// T0: Address (i.e. object) being stored into. -void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) { - // Save values being destroyed. - __ Comment("UpdateStoreBufferStub"); - __ addiu(SP, SP, Immediate(-3 * kWordSize)); - __ sw(T3, Address(SP, 2 * kWordSize)); - __ sw(T2, Address(SP, 1 * kWordSize)); - __ sw(T1, Address(SP, 0 * kWordSize)); - - Label add_to_buffer; - // Check whether this object has already been remembered. Skip adding to the - // store buffer if the object is in the store buffer already. - // Spilled: T1, T2, T3. - // T0: Address being stored. - __ lw(T2, FieldAddress(T0, Object::tags_offset())); - __ andi(CMPRES1, T2, Immediate(1 << RawObject::kRememberedBit)); - __ beq(CMPRES1, ZR, &add_to_buffer); - __ lw(T1, Address(SP, 0 * kWordSize)); - __ lw(T2, Address(SP, 1 * kWordSize)); - __ lw(T3, Address(SP, 2 * kWordSize)); - __ addiu(SP, SP, Immediate(3 * kWordSize)); - __ Ret(); - - __ Bind(&add_to_buffer); - // Atomically set the remembered bit of the object header. - Label retry; - __ Bind(&retry); - __ ll(T2, FieldAddress(T0, Object::tags_offset())); - __ ori(T2, T2, Immediate(1 << RawObject::kRememberedBit)); - __ sc(T2, FieldAddress(T0, Object::tags_offset())); - // T2 = 1 on success, 0 on failure. - __ beq(T2, ZR, &retry); - - // Load the StoreBuffer block out of the thread. Then load top_ out of the - // StoreBufferBlock and add the address to the pointers_. - __ lw(T1, Address(THR, Thread::store_buffer_block_offset())); - __ lw(T2, Address(T1, StoreBufferBlock::top_offset())); - __ sll(T3, T2, 2); - __ addu(T3, T1, T3); - __ sw(T0, Address(T3, StoreBufferBlock::pointers_offset())); - - // Increment top_ and check for overflow. - // T2: top_ - // T1: StoreBufferBlock - Label L; - __ addiu(T2, T2, Immediate(1)); - __ sw(T2, Address(T1, StoreBufferBlock::top_offset())); - __ addiu(CMPRES1, T2, Immediate(-StoreBufferBlock::kSize)); - // Restore values. - __ lw(T1, Address(SP, 0 * kWordSize)); - __ lw(T2, Address(SP, 1 * kWordSize)); - __ lw(T3, Address(SP, 2 * kWordSize)); - __ beq(CMPRES1, ZR, &L); - __ delay_slot()->addiu(SP, SP, Immediate(3 * kWordSize)); - __ Ret(); - - // Handle overflow: Call the runtime leaf function. - __ Bind(&L); - // Setup frame, push callee-saved registers. - - __ EnterCallRuntimeFrame(1 * kWordSize); - __ mov(A0, THR); - __ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1); - __ Comment("UpdateStoreBufferStub return"); - // Restore callee-saved registers, tear down frame. - __ LeaveCallRuntimeFrame(); - __ Ret(); -} - - -// Called for inline allocation of objects. -// Input parameters: -// RA : return address. -// SP + 0 : type arguments object (only if class is parameterized). -void StubCode::GenerateAllocationStubForClass(Assembler* assembler, - const Class& cls) { - __ Comment("AllocationStubForClass"); - // The generated code is different if the class is parameterized. - const bool is_cls_parameterized = cls.NumTypeArguments() > 0; - ASSERT(!is_cls_parameterized || - (cls.type_arguments_field_offset() != Class::kNoTypeArguments)); - // kInlineInstanceSize is a constant used as a threshold for determining - // when the object initialization should be done as a loop or as - // straight line code. - const int kInlineInstanceSize = 12; - const intptr_t instance_size = cls.instance_size(); - ASSERT(instance_size > 0); - if (is_cls_parameterized) { - __ lw(T1, Address(SP, 0 * kWordSize)); - // T1: type arguments. - } - Isolate* isolate = Isolate::Current(); - if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) && - !cls.TraceAllocation(isolate)) { - Label slow_case; - // Allocate the object and update top to point to - // next object start and initialize the allocated object. - // T1: instantiated type arguments (if is_cls_parameterized). - Heap::Space space = Heap::kNew; - __ lw(T5, Address(THR, Thread::heap_offset())); - __ lw(T2, Address(T5, Heap::TopOffset(space))); - __ LoadImmediate(T4, instance_size); - __ addu(T3, T2, T4); - // Check if the allocation fits into the remaining space. - // T2: potential new object start. - // T3: potential next object start. - // T5: heap. - __ lw(CMPRES1, Address(T5, Heap::EndOffset(space))); - if (FLAG_use_slow_path) { - __ b(&slow_case); - } else { - __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case); - } - // Successfully allocated the object(s), now update top to point to - // next object start and initialize the object. - __ sw(T3, Address(T5, Heap::TopOffset(space))); - NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id(), T5, space)); - - // T2: new object start. - // T3: next object start. - // T1: new object type arguments (if is_cls_parameterized). - // Set the tags. - uint32_t tags = 0; - tags = RawObject::SizeTag::update(instance_size, tags); - ASSERT(cls.id() != kIllegalCid); - tags = RawObject::ClassIdTag::update(cls.id(), tags); - __ LoadImmediate(T0, tags); - __ sw(T0, Address(T2, Instance::tags_offset())); - - __ LoadObject(T7, Object::null_object()); - - // Initialize the remaining words of the object. - // T2: new object start. - // T3: next object start. - // T1: new object type arguments (if is_cls_parameterized). - // First try inlining the initialization without a loop. - if (instance_size < (kInlineInstanceSize * kWordSize)) { - // Check if the object contains any non-header fields. - // Small objects are initialized using a consecutive set of writes. - for (intptr_t current_offset = Instance::NextFieldOffset(); - current_offset < instance_size; current_offset += kWordSize) { - __ sw(T7, Address(T2, current_offset)); - } - } else { - __ addiu(T4, T2, Immediate(Instance::NextFieldOffset())); - // Loop until the whole object is initialized. - // T2: new object. - // T3: next object start. - // T4: next word to be initialized. - // T1: new object type arguments (if is_cls_parameterized). - Label loop, loop_exit; - __ BranchUnsignedGreaterEqual(T4, T3, &loop_exit); - __ Bind(&loop); - __ addiu(T4, T4, Immediate(kWordSize)); - __ bne(T4, T3, &loop); - __ delay_slot()->sw(T7, Address(T4, -kWordSize)); - __ Bind(&loop_exit); - } - if (is_cls_parameterized) { - // T1: new object type arguments. - // Set the type arguments in the new object. - __ sw(T1, Address(T2, cls.type_arguments_field_offset())); - } - // Done allocating and initializing the instance. - // T2: new object still missing its heap tag. - __ Ret(); - __ delay_slot()->addiu(V0, T2, Immediate(kHeapObjectTag)); - - __ Bind(&slow_case); - } - // If is_cls_parameterized: - // T1: new object type arguments (instantiated or not). - // Create a stub frame as we are pushing some objects on the stack before - // calling into the runtime. - __ EnterStubFrame(); // Uses pool pointer to pass cls to runtime. - __ LoadObject(TMP, cls); - - __ addiu(SP, SP, Immediate(-3 * kWordSize)); - // Space on stack for return value. - __ LoadObject(T7, Object::null_object()); - __ sw(T7, Address(SP, 2 * kWordSize)); - __ sw(TMP, Address(SP, 1 * kWordSize)); // Class of object to be allocated. - - if (is_cls_parameterized) { - // Push type arguments of object to be allocated and of instantiator. - __ sw(T1, Address(SP, 0 * kWordSize)); - } else { - // Push null type arguments. - __ sw(T7, Address(SP, 0 * kWordSize)); - } - __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. - __ Comment("AllocationStubForClass return"); - // Pop result (newly allocated object). - __ lw(V0, Address(SP, 2 * kWordSize)); - __ addiu(SP, SP, Immediate(3 * kWordSize)); // Pop arguments. - // V0: new object - // Restore the frame pointer and return. - __ LeaveStubFrameAndReturn(RA); -} - - -// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function -// from the entry code of a dart function after an error in passed argument -// name or number is detected. -// Input parameters: -// RA : return address. -// SP : address of last argument. -// S4: arguments descriptor array. -void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) { - __ EnterStubFrame(); - - // Load the receiver. - __ lw(A1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - __ sll(TMP, A1, 1); // A1 is a Smi. - __ addu(TMP, FP, TMP); - __ lw(T6, Address(TMP, kParamEndSlotFromFp * kWordSize)); - - // Push space for the return value. - // Push the receiver. - // Push arguments descriptor array. - const intptr_t kNumArgs = 3; - __ addiu(SP, SP, Immediate(-kNumArgs * kWordSize)); - __ sw(ZR, Address(SP, 2 * kWordSize)); - __ sw(T6, Address(SP, 1 * kWordSize)); - __ sw(S4, Address(SP, 0 * kWordSize)); - - // Adjust arguments count. - __ lw(TMP, FieldAddress(S4, ArgumentsDescriptor::type_args_len_offset())); - Label args_count_ok; - __ BranchEqual(TMP, Immediate(0), &args_count_ok); - __ AddImmediate(A1, A1, Smi::RawValue(1)); // Include the type arguments. - __ Bind(&args_count_ok); - - // A1: Smi-tagged arguments array length. - PushArgumentsArray(assembler); - - __ CallRuntime(kInvokeClosureNoSuchMethodRuntimeEntry, kNumArgs); - // noSuchMethod on closures always throws an error, so it will never return. - __ break_(0); -} - - -// T0: function object. -// S5: inline cache data object. -// Cannot use function object from ICData as it may be the inlined -// function and not the top-scope function. -void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) { - __ Comment("OptimizedUsageCounterIncrement"); - Register ic_reg = S5; - Register func_reg = T0; - if (FLAG_trace_optimized_ic_calls) { - __ EnterStubFrame(); - __ addiu(SP, SP, Immediate(-4 * kWordSize)); - __ sw(T0, Address(SP, 3 * kWordSize)); - __ sw(S5, Address(SP, 2 * kWordSize)); - __ sw(ic_reg, Address(SP, 1 * kWordSize)); // Argument. - __ sw(func_reg, Address(SP, 0 * kWordSize)); // Argument. - __ CallRuntime(kTraceICCallRuntimeEntry, 2); - __ lw(S5, Address(SP, 2 * kWordSize)); - __ lw(T0, Address(SP, 3 * kWordSize)); - __ addiu(SP, SP, Immediate(4 * kWordSize)); // Discard argument; - __ LeaveStubFrame(); - } - __ lw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); - __ addiu(T7, T7, Immediate(1)); - __ sw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); -} - - -// Loads function into 'temp_reg'. -void StubCode::GenerateUsageCounterIncrement(Assembler* assembler, - Register temp_reg) { - if (FLAG_optimization_counter_threshold >= 0) { - __ Comment("UsageCounterIncrement"); - Register ic_reg = S5; - Register func_reg = temp_reg; - ASSERT(temp_reg == T0); - __ Comment("Increment function counter"); - __ lw(func_reg, FieldAddress(ic_reg, ICData::owner_offset())); - __ lw(T1, FieldAddress(func_reg, Function::usage_counter_offset())); - __ addiu(T1, T1, Immediate(1)); - __ sw(T1, FieldAddress(func_reg, Function::usage_counter_offset())); - } -} - - -// Note: S5 must be preserved. -// Attempt a quick Smi operation for known operations ('kind'). The ICData -// must have been primed with a Smi/Smi check that will be used for counting -// the invocations. -static void EmitFastSmiOp(Assembler* assembler, - Token::Kind kind, - intptr_t num_args, - Label* not_smi_or_overflow) { - __ Comment("Fast Smi op"); - ASSERT(num_args == 2); - __ lw(T0, Address(SP, 0 * kWordSize)); // Left. - __ lw(T1, Address(SP, 1 * kWordSize)); // Right. - __ or_(CMPRES1, T0, T1); - __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); - __ bne(CMPRES1, ZR, not_smi_or_overflow); - switch (kind) { - case Token::kADD: { - __ AdduDetectOverflow(V0, T1, T0, CMPRES1); // Add. - __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. - break; - } - case Token::kSUB: { - __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. - __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. - break; - } - case Token::kEQ: { - Label true_label, done; - __ beq(T1, T0, &true_label); - __ LoadObject(V0, Bool::False()); - __ b(&done); - __ Bind(&true_label); - __ LoadObject(V0, Bool::True()); - __ Bind(&done); - break; - } - default: - UNIMPLEMENTED(); - } - // S5: IC data object (preserved). - __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); - // T0: ic_data_array with check entries: classes and target functions. - __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); -// T0: points directly to the first ic data array element. -#if defined(DEBUG) - // Check that first entry is for Smi/Smi. - Label error, ok; - const int32_t imm_smi_cid = reinterpret_cast(Smi::New(kSmiCid)); - __ lw(T4, Address(T0)); - __ BranchNotEqual(T4, Immediate(imm_smi_cid), &error); - __ lw(T4, Address(T0, kWordSize)); - __ BranchEqual(T4, Immediate(imm_smi_cid), &ok); - __ Bind(&error); - __ Stop("Incorrect IC data"); - __ Bind(&ok); -#endif - if (FLAG_optimization_counter_threshold >= 0) { - // Update counter, ignore overflow. - const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; - __ lw(T4, Address(T0, count_offset)); - __ AddImmediate(T4, T4, Smi::RawValue(1)); - __ sw(T4, Address(T0, count_offset)); - } - - __ Ret(); -} - - -// Generate inline cache check for 'num_args'. -// RA: return address -// S5: Inline cache data object. -// Control flow: -// - If receiver is null -> jump to IC miss. -// - If receiver is Smi -> load Smi class. -// - If receiver is not-Smi -> load receiver's class. -// - Check if 'num_args' (including receiver) match any IC data group. -// - Match found -> jump to target. -// - Match not found -> jump to IC miss. -void StubCode::GenerateNArgsCheckInlineCacheStub( - Assembler* assembler, - intptr_t num_args, - const RuntimeEntry& handle_ic_miss, - Token::Kind kind, - bool optimized) { - __ Comment("NArgsCheckInlineCacheStub"); - ASSERT(num_args == 1 || num_args == 2); -#if defined(DEBUG) - { - Label ok; - // Check that the IC data array has NumArgsTested() == num_args. - // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. - __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); - ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. - __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); - __ BranchEqual(T0, Immediate(num_args), &ok); - __ Stop("Incorrect stub for IC data"); - __ Bind(&ok); - } -#endif // DEBUG - - - Label stepping, done_stepping; - if (FLAG_support_debugger && !optimized) { - __ Comment("Check single stepping"); - __ LoadIsolate(T0); - __ lbu(T0, Address(T0, Isolate::single_step_offset())); - __ BranchNotEqual(T0, Immediate(0), &stepping); - __ Bind(&done_stepping); - } - - Label not_smi_or_overflow; - if (kind != Token::kILLEGAL) { - EmitFastSmiOp(assembler, kind, num_args, ¬_smi_or_overflow); - } - __ Bind(¬_smi_or_overflow); - - __ Comment("Extract ICData initial values and receiver cid"); - // Load argument descriptor into S4. - __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); - // Preserve return address, since RA is needed for subroutine call. - __ mov(T2, RA); - // Loop that checks if there is an IC data match. - Label loop, found, miss; - // S5: IC data object (preserved). - __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); - // T0: ic_data_array with check entries: classes and target functions. - __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); - // T0: points directly to the first ic data array element. - - // Get the receiver's class ID (first read number of arguments from - // arguments descriptor array and then access the receiver from the stack). - __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); - __ sll(T5, T1, 1); // T1 (argument_count - 1) is smi. - __ addu(T5, T5, SP); - __ lw(T3, Address(T5, -kWordSize)); - __ LoadTaggedClassIdMayBeSmi(T3, T3); - - if (num_args == 2) { - __ lw(T5, Address(T5, -2 * kWordSize)); - __ LoadTaggedClassIdMayBeSmi(T5, T5); - } - - const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; - // T1: argument_count (smi). - // T3: receiver's class ID (smi). - // T5: first argument's class ID (smi). - - // We unroll the generic one that is generated once more than the others. - const bool optimize = kind == Token::kILLEGAL; - - __ Comment("ICData loop"); - __ Bind(&loop); - for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) { - __ lw(T4, Address(T0, 0)); - if (num_args == 1) { - __ beq(T3, T4, &found); // IC hit. - } else { - ASSERT(num_args == 2); - Label update; - __ bne(T3, T4, &update); // Continue. - __ lw(T4, Address(T0, kWordSize)); - __ beq(T5, T4, &found); // IC hit. - __ Bind(&update); - } - - __ AddImmediate(T0, entry_size); // Next entry. - if (unroll == 0) { - __ BranchNotEqual(T4, Immediate(Smi::RawValue(kIllegalCid)), - &loop); // Done? - } else { - __ BranchEqual(T4, Immediate(Smi::RawValue(kIllegalCid)), - &miss); // Done? - } - } - - __ Bind(&miss); - __ Comment("IC miss"); - // Restore return address. - __ mov(RA, T2); - - // Compute address of arguments (first read number of arguments from - // arguments descriptor array and then compute address on the stack). - // T1: argument_count (smi). - __ addiu(T1, T1, Immediate(Smi::RawValue(-1))); - __ sll(T1, T1, 1); // T1 is Smi. - __ addu(T1, SP, T1); - // T1: address of receiver. - // Create a stub frame as we are pushing some objects on the stack before - // calling into the runtime. - __ EnterStubFrame(); - // Preserve IC data object and arguments descriptor array and - // setup space on stack for result (target code object). - int num_slots = num_args + 4; - __ addiu(SP, SP, Immediate(-num_slots * kWordSize)); - __ sw(S5, Address(SP, (num_slots - 1) * kWordSize)); - __ sw(S4, Address(SP, (num_slots - 2) * kWordSize)); - __ sw(ZR, Address(SP, (num_slots - 3) * kWordSize)); - // Push call arguments. - for (intptr_t i = 0; i < num_args; i++) { - __ lw(TMP, Address(T1, -i * kWordSize)); - __ sw(TMP, Address(SP, (num_slots - i - 4) * kWordSize)); - } - // Pass IC data object. - __ sw(S5, Address(SP, (num_slots - num_args - 4) * kWordSize)); - __ CallRuntime(handle_ic_miss, num_args + 1); - __ Comment("NArgsCheckInlineCacheStub return"); - // Pop returned function object into T3. - // Restore arguments descriptor array and IC data array. - __ lw(T3, Address(SP, (num_slots - 3) * kWordSize)); - __ lw(S4, Address(SP, (num_slots - 2) * kWordSize)); - __ lw(S5, Address(SP, (num_slots - 1) * kWordSize)); - // Remove the call arguments pushed earlier, including the IC data object - // and the arguments descriptor array. - __ addiu(SP, SP, Immediate(num_slots * kWordSize)); - __ RestoreCodePointer(); - __ LeaveStubFrame(); - - Label call_target_function; - if (!FLAG_lazy_dispatchers) { - __ mov(T0, T3); - GenerateDispatcherCode(assembler, &call_target_function); - } else { - __ b(&call_target_function); - } - - __ Bind(&found); - __ mov(RA, T2); // Restore return address if found. - __ Comment("Update caller's counter"); - // T0: Pointer to an IC data check group. - const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize; - const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; - __ lw(T3, Address(T0, target_offset)); - - if (FLAG_optimization_counter_threshold >= 0) { - // Update counter, ignore overflow. - __ lw(T4, Address(T0, count_offset)); - __ AddImmediate(T4, T4, Smi::RawValue(1)); - __ sw(T4, Address(T0, count_offset)); - } - - __ Comment("Call target"); - __ Bind(&call_target_function); - // T0 <- T3: Target function. - __ mov(T0, T3); - Label is_compiled; - __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); - __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); - __ jr(T4); - - // Call single step callback in debugger. - if (FLAG_support_debugger && !optimized) { - __ Bind(&stepping); - __ EnterStubFrame(); - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. - __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. - __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); - __ lw(RA, Address(SP, 0 * kWordSize)); - __ lw(S5, Address(SP, 1 * kWordSize)); - __ addiu(SP, SP, Immediate(2 * kWordSize)); - __ RestoreCodePointer(); - __ LeaveStubFrame(); - __ b(&done_stepping); - } -} - - -// Use inline cache data array to invoke the target or continue in inline -// cache miss handler. Stub for 1-argument check (receiver class). -// RA: Return address. -// S5: Inline cache data object. -// Inline cache data object structure: -// 0: function-name -// 1: N, number of arguments checked. -// 2 .. (length - 1): group of checks, each check containing: -// - N classes. -// - 1 target function. -void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { - GenerateUsageCounterIncrement(assembler, T0); - GenerateNArgsCheckInlineCacheStub( - assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); -} - - -void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { - GenerateUsageCounterIncrement(assembler, T0); - GenerateNArgsCheckInlineCacheStub(assembler, 2, - kInlineCacheMissHandlerTwoArgsRuntimeEntry, - Token::kILLEGAL); -} - - -void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) { - GenerateUsageCounterIncrement(assembler, T0); - GenerateNArgsCheckInlineCacheStub( - assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD); -} - - -void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) { - GenerateUsageCounterIncrement(assembler, T0); - GenerateNArgsCheckInlineCacheStub( - assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB); -} - - -void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { - GenerateUsageCounterIncrement(assembler, T0); - GenerateNArgsCheckInlineCacheStub( - assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ); -} - - -void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( - Assembler* assembler) { - GenerateOptimizedUsageCounterIncrement(assembler); - GenerateNArgsCheckInlineCacheStub(assembler, 1, - kInlineCacheMissHandlerOneArgRuntimeEntry, - Token::kILLEGAL, true /* optimized */); -} - - -void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( - Assembler* assembler) { - GenerateOptimizedUsageCounterIncrement(assembler); - GenerateNArgsCheckInlineCacheStub(assembler, 2, - kInlineCacheMissHandlerTwoArgsRuntimeEntry, - Token::kILLEGAL, true /* optimized */); -} - - -// Intermediary stub between a static call and its target. ICData contains -// the target function and the call count. -// S5: ICData -void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { - GenerateUsageCounterIncrement(assembler, T0); - __ Comment("UnoptimizedStaticCallStub"); -#if defined(DEBUG) - { - Label ok; - // Check that the IC data array has NumArgsTested() == 0. - // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. - __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); - ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. - __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); - __ beq(T0, ZR, &ok); - __ Stop("Incorrect IC data for unoptimized static call"); - __ Bind(&ok); - } -#endif // DEBUG - - // Check single stepping. - Label stepping, done_stepping; - if (FLAG_support_debugger) { - __ LoadIsolate(T0); - __ lbu(T0, Address(T0, Isolate::single_step_offset())); - __ BranchNotEqual(T0, Immediate(0), &stepping); - __ Bind(&done_stepping); - } - - // S5: IC data object (preserved). - __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); - // T0: ic_data_array with entries: target functions and count. - __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); - // T0: points directly to the first ic data array element. - const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize; - const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize; - - if (FLAG_optimization_counter_threshold >= 0) { - // Increment count for this call, ignore overflow. - __ lw(T4, Address(T0, count_offset)); - __ AddImmediate(T4, T4, Smi::RawValue(1)); - __ sw(T4, Address(T0, count_offset)); - } - - // Load arguments descriptor into S4. - __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); - - // Get function and call it, if possible. - __ lw(T0, Address(T0, target_offset)); - __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); - __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); - __ jr(T4); - - // Call single step callback in debugger. - if (FLAG_support_debugger) { - __ Bind(&stepping); - __ EnterStubFrame(); - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. - __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. - __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); - __ lw(RA, Address(SP, 0 * kWordSize)); - __ lw(S5, Address(SP, 1 * kWordSize)); - __ addiu(SP, SP, Immediate(2 * kWordSize)); - __ RestoreCodePointer(); - __ LeaveStubFrame(); - __ b(&done_stepping); - } -} - - -void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { - GenerateUsageCounterIncrement(assembler, T0); - GenerateNArgsCheckInlineCacheStub( - assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); -} - - -void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { - GenerateUsageCounterIncrement(assembler, T0); - GenerateNArgsCheckInlineCacheStub( - assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); -} - - -// Stub for compiling a function and jumping to the compiled code. -// S5: IC-Data (for methods). -// S4: Arguments descriptor. -// T0: Function. -void StubCode::GenerateLazyCompileStub(Assembler* assembler) { - __ EnterStubFrame(); - __ addiu(SP, SP, Immediate(-3 * kWordSize)); - __ sw(S5, Address(SP, 2 * kWordSize)); // Preserve IC data object. - __ sw(S4, Address(SP, 1 * kWordSize)); // Preserve args descriptor array. - __ sw(T0, Address(SP, 0 * kWordSize)); // Pass function. - __ CallRuntime(kCompileFunctionRuntimeEntry, 1); - __ lw(T0, Address(SP, 0 * kWordSize)); // Restore function. - __ lw(S4, Address(SP, 1 * kWordSize)); // Restore args descriptor array. - __ lw(S5, Address(SP, 2 * kWordSize)); // Restore IC data array. - __ addiu(SP, SP, Immediate(3 * kWordSize)); - __ LeaveStubFrame(); - - __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); - __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); - __ jr(T2); -} - - -// S5: Contains an ICData. -void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { - __ Comment("ICCallBreakpoint stub"); - __ EnterStubFrame(); - __ addiu(SP, SP, Immediate(-2 * kWordSize)); - __ sw(S5, Address(SP, 1 * kWordSize)); - __ sw(ZR, Address(SP, 0 * kWordSize)); - - __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); - - __ lw(S5, Address(SP, 1 * kWordSize)); - __ lw(CODE_REG, Address(SP, 0 * kWordSize)); - __ addiu(SP, SP, Immediate(2 * kWordSize)); - __ LeaveStubFrame(); - __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); - __ jr(T0); -} - - -void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { - __ Comment("RuntimeCallBreakpoint stub"); - __ EnterStubFrame(); - __ addiu(SP, SP, Immediate(-1 * kWordSize)); - __ sw(ZR, Address(SP, 0 * kWordSize)); - - __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); - - __ lw(CODE_REG, Address(SP, 0 * kWordSize)); - __ addiu(SP, SP, Immediate(3 * kWordSize)); - __ LeaveStubFrame(); - __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); - __ jr(T0); -} - - -// Called only from unoptimized code. All relevant registers have been saved. -// RA: return address. -void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) { - // Check single stepping. - Label stepping, done_stepping; - __ LoadIsolate(T0); - __ lbu(T0, Address(T0, Isolate::single_step_offset())); - __ BranchNotEqual(T0, Immediate(0), &stepping); - __ Bind(&done_stepping); - - __ Ret(); - - // Call single step callback in debugger. - __ Bind(&stepping); - __ EnterStubFrame(); - __ addiu(SP, SP, Immediate(-1 * kWordSize)); - __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. - __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); - __ lw(RA, Address(SP, 0 * kWordSize)); - __ addiu(SP, SP, Immediate(1 * kWordSize)); - __ LeaveStubFrame(); - __ b(&done_stepping); -} - - -// Used to check class and type arguments. Arguments passed in registers: -// RA: return address. -// A0: instance (must be preserved). -// A1: instantiator type arguments (only if n == 4, can be raw_null). -// A2: function type arguments (only if n == 4, can be raw_null). -// A3: SubtypeTestCache. -// Result in V0: null -> not found, otherwise result (true or false). -static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { - __ Comment("SubtypeNTestCacheStub"); - ASSERT((n == 1) || (n == 2) || (n == 4)); - if (n > 1) { - __ LoadClass(T0, A0); - // Compute instance type arguments into T1. - Label has_no_type_arguments; - __ LoadObject(T1, Object::null_object()); - __ lw(T2, FieldAddress( - T0, Class::type_arguments_field_offset_in_words_offset())); - __ BranchEqual(T2, Immediate(Class::kNoTypeArguments), - &has_no_type_arguments); - __ sll(T2, T2, 2); - __ addu(T2, A0, T2); // T2 <- A0 + T2 * 4 - __ lw(T1, FieldAddress(T2, 0)); - __ Bind(&has_no_type_arguments); - } - __ LoadClassId(T0, A0); - // A0: instance. - // A1: instantiator type arguments (only if n == 4, can be raw_null). - // A2: function type arguments (only if n == 4, can be raw_null). - // A3: SubtypeTestCache. - // T0: instance class id. - // T1: instance type arguments (null if none), used only if n > 1. - __ lw(T2, FieldAddress(A3, SubtypeTestCache::cache_offset())); - __ AddImmediate(T2, Array::data_offset() - kHeapObjectTag); - - __ LoadObject(T7, Object::null_object()); - Label loop, found, not_found, next_iteration; - // T0: instance class id. - // T1: instance type arguments (still null if closure). - // T2: Entry start. - // T7: null. - __ SmiTag(T0); - __ BranchNotEqual(T0, Immediate(Smi::RawValue(kClosureCid)), &loop); - __ lw(T1, FieldAddress(A0, Closure::function_type_arguments_offset())); - __ bne(T1, T7, ¬_found); // Cache cannot be used for generic closures. - __ lw(T1, FieldAddress(A0, Closure::instantiator_type_arguments_offset())); - __ lw(T0, FieldAddress(A0, Closure::function_offset())); - // T0: instance class id as Smi or function. - __ Bind(&loop); - __ lw(T3, - Address(T2, kWordSize * SubtypeTestCache::kInstanceClassIdOrFunction)); - __ beq(T3, T7, ¬_found); - if (n == 1) { - __ beq(T3, T0, &found); - } else { - __ bne(T3, T0, &next_iteration); - __ lw(T3, - Address(T2, kWordSize * SubtypeTestCache::kInstanceTypeArguments)); - if (n == 2) { - __ beq(T3, T1, &found); - } else { - __ bne(T3, T1, &next_iteration); - __ lw(T3, Address(T2, kWordSize * - SubtypeTestCache::kInstantiatorTypeArguments)); - __ bne(T3, A1, &next_iteration); - __ lw(T3, - Address(T2, kWordSize * SubtypeTestCache::kFunctionTypeArguments)); - __ beq(T3, A2, &found); - } - } - __ Bind(&next_iteration); - __ b(&loop); - __ delay_slot()->addiu( - T2, T2, Immediate(kWordSize * SubtypeTestCache::kTestEntryLength)); - // Fall through to not found. - __ Bind(¬_found); - __ Ret(); - __ delay_slot()->mov(V0, T7); - - __ Bind(&found); - __ Ret(); - __ delay_slot()->lw(V0, - Address(T2, kWordSize * SubtypeTestCache::kTestResult)); -} - - -// Used to check class and type arguments. Arguments passed in registers: -// RA: return address. -// A0: instance (must be preserved). -// A1: unused. -// A2: unused. -// A3: SubtypeTestCache. -// Result in V0: null -> not found, otherwise result (true or false). -void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) { - GenerateSubtypeNTestCacheStub(assembler, 1); -} - - -// Used to check class and type arguments. Arguments passed in registers: -// RA: return address. -// A0: instance (must be preserved). -// A1: unused. -// A2: unused. -// A3: SubtypeTestCache. -// Result in V0: null -> not found, otherwise result (true or false). -void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) { - GenerateSubtypeNTestCacheStub(assembler, 2); -} - - -// Used to check class and type arguments. Arguments passed in registers: -// RA: return address. -// A0: instance (must be preserved). -// A1: instantiator type arguments (can be raw_null). -// A2: function type arguments (can be raw_null). -// A3: SubtypeTestCache. -// Result in V0: null -> not found, otherwise result (true or false). -void StubCode::GenerateSubtype4TestCacheStub(Assembler* assembler) { - GenerateSubtypeNTestCacheStub(assembler, 4); -} - - -// Return the current stack pointer address, used to stack alignment -// checks. -void StubCode::GenerateGetCStackPointerStub(Assembler* assembler) { - __ Ret(); - __ delay_slot()->mov(V0, SP); -} - - -// Jump to the exception or error handler. -// RA: return address. -// A0: program_counter. -// A1: stack_pointer. -// A2: frame_pointer. -// A3: thread. -// Does not return. -void StubCode::GenerateJumpToFrameStub(Assembler* assembler) { - ASSERT(kExceptionObjectReg == V0); - ASSERT(kStackTraceObjectReg == V1); - __ mov(FP, A2); // Frame_pointer. - __ mov(THR, A3); // Thread. - // Set tag. - __ LoadImmediate(A2, VMTag::kDartTagId); - __ sw(A2, Assembler::VMTagAddress()); - // Clear top exit frame. - __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); - // Restore pool pointer. - __ RestoreCodePointer(); - __ LoadPoolPointer(); - __ jr(A0); // Jump to the program counter. - __ delay_slot()->mov(SP, A1); // Stack pointer. -} - - -// Run an exception handler. Execution comes from JumpToFrame -// stub or from the simulator. -// -// The arguments are stored in the Thread object. -// Does not return. -void StubCode::GenerateRunExceptionHandlerStub(Assembler* assembler) { - __ lw(A0, Address(THR, Thread::resume_pc_offset())); - __ LoadImmediate(A2, 0); - - // Load the exception from the current thread. - Address exception_addr(THR, Thread::active_exception_offset()); - __ lw(V0, exception_addr); - __ sw(A2, exception_addr); - - // Load the stacktrace from the current thread. - Address stacktrace_addr(THR, Thread::active_stacktrace_offset()); - __ lw(V1, stacktrace_addr); - - __ jr(A0); // Jump to continuation point. - __ delay_slot()->sw(A2, stacktrace_addr); -} - - -// Deoptimize a frame on the call stack before rewinding. -// The arguments are stored in the Thread object. -// No result. -void StubCode::GenerateDeoptForRewindStub(Assembler* assembler) { - // Push zap value instead of CODE_REG. - __ LoadImmediate(TMP, kZapCodeReg); - __ Push(TMP); - - // Load the deopt pc into RA. - __ lw(RA, Address(THR, Thread::resume_pc_offset())); - GenerateDeoptimizationSequence(assembler, kEagerDeopt); - - // After we have deoptimized, jump to the correct frame. - __ EnterStubFrame(); - __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0); - __ LeaveStubFrame(); - __ break_(0); -} - - -// Calls to the runtime to optimize the given function. -// T0: function to be reoptimized. -// S4: argument descriptor (preserved). -void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { - __ Comment("OptimizeFunctionStub"); - __ EnterStubFrame(); - __ addiu(SP, SP, Immediate(-3 * kWordSize)); - __ sw(S4, Address(SP, 2 * kWordSize)); - // Setup space on stack for return value. - __ sw(ZR, Address(SP, 1 * kWordSize)); - __ sw(T0, Address(SP, 0 * kWordSize)); - __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); - __ Comment("OptimizeFunctionStub return"); - __ lw(T0, Address(SP, 1 * kWordSize)); // Get Function object - __ lw(S4, Address(SP, 2 * kWordSize)); // Restore argument descriptor. - __ addiu(SP, SP, Immediate(3 * kWordSize)); // Discard argument. - - __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); - __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); - __ LeaveStubFrameAndReturn(T1); - __ break_(0); -} - - -// Does identical check (object references are equal or not equal) with special -// checks for boxed numbers. -// Returns: CMPRES1 is zero if equal, non-zero otherwise. -// Note: A Mint cannot contain a value that would fit in Smi, a Bigint -// cannot contain a value that fits in Mint or Smi. -static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler, - const Register left, - const Register right, - const Register temp1, - const Register temp2) { - __ Comment("IdenticalWithNumberCheckStub"); - Label reference_compare, done, check_mint, check_bigint; - // If any of the arguments is Smi do reference compare. - __ andi(temp1, left, Immediate(kSmiTagMask)); - __ beq(temp1, ZR, &reference_compare); - __ andi(temp1, right, Immediate(kSmiTagMask)); - __ beq(temp1, ZR, &reference_compare); - - // Value compare for two doubles. - __ LoadImmediate(temp1, kDoubleCid); - __ LoadClassId(temp2, left); - __ bne(temp1, temp2, &check_mint); - __ LoadClassId(temp2, right); - __ subu(CMPRES1, temp1, temp2); - __ bne(CMPRES1, ZR, &done); - - // Double values bitwise compare. - __ lw(temp1, FieldAddress(left, Double::value_offset() + 0 * kWordSize)); - __ lw(temp2, FieldAddress(right, Double::value_offset() + 0 * kWordSize)); - __ subu(CMPRES1, temp1, temp2); - __ bne(CMPRES1, ZR, &done); - __ lw(temp1, FieldAddress(left, Double::value_offset() + 1 * kWordSize)); - __ lw(temp2, FieldAddress(right, Double::value_offset() + 1 * kWordSize)); - __ b(&done); - __ delay_slot()->subu(CMPRES1, temp1, temp2); - - __ Bind(&check_mint); - __ LoadImmediate(temp1, kMintCid); - __ LoadClassId(temp2, left); - __ bne(temp1, temp2, &check_bigint); - __ LoadClassId(temp2, right); - __ subu(CMPRES1, temp1, temp2); - __ bne(CMPRES1, ZR, &done); - - __ lw(temp1, FieldAddress(left, Mint::value_offset() + 0 * kWordSize)); - __ lw(temp2, FieldAddress(right, Mint::value_offset() + 0 * kWordSize)); - __ subu(CMPRES1, temp1, temp2); - __ bne(CMPRES1, ZR, &done); - __ lw(temp1, FieldAddress(left, Mint::value_offset() + 1 * kWordSize)); - __ lw(temp2, FieldAddress(right, Mint::value_offset() + 1 * kWordSize)); - __ b(&done); - __ delay_slot()->subu(CMPRES1, temp1, temp2); - - __ Bind(&check_bigint); - __ LoadImmediate(temp1, kBigintCid); - __ LoadClassId(temp2, left); - __ bne(temp1, temp2, &reference_compare); - __ LoadClassId(temp2, right); - __ subu(CMPRES1, temp1, temp2); - __ bne(CMPRES1, ZR, &done); - - __ EnterStubFrame(); - __ ReserveAlignedFrameSpace(2 * kWordSize); - __ sw(left, Address(SP, 1 * kWordSize)); - __ sw(right, Address(SP, 0 * kWordSize)); - __ mov(A0, left); - __ mov(A1, right); - __ CallRuntime(kBigintCompareRuntimeEntry, 2); - __ Comment("IdenticalWithNumberCheckStub return"); - // Result in V0, 0 means equal. - __ LeaveStubFrame(); - __ b(&done); - __ delay_slot()->mov(CMPRES1, V0); - - __ Bind(&reference_compare); - __ subu(CMPRES1, left, right); - __ Bind(&done); - // A branch or test after this comparison will check CMPRES1 == ZR. -} - - -// Called only from unoptimized code. All relevant registers have been saved. -// RA: return address. -// SP + 4: left operand. -// SP + 0: right operand. -// Returns: CMPRES1 is zero if equal, non-zero otherwise. -void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub( - Assembler* assembler) { - // Check single stepping. - Label stepping, done_stepping; - if (FLAG_support_debugger) { - __ LoadIsolate(T0); - __ lbu(T0, Address(T0, Isolate::single_step_offset())); - __ BranchNotEqual(T0, Immediate(0), &stepping); - __ Bind(&done_stepping); - } - - const Register temp1 = T2; - const Register temp2 = T3; - const Register left = T1; - const Register right = T0; - __ lw(left, Address(SP, 1 * kWordSize)); - __ lw(right, Address(SP, 0 * kWordSize)); - GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2); - __ Ret(); - - // Call single step callback in debugger. - if (FLAG_support_debugger) { - __ Bind(&stepping); - __ EnterStubFrame(); - __ addiu(SP, SP, Immediate(-1 * kWordSize)); - __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. - __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); - __ lw(RA, Address(SP, 0 * kWordSize)); - __ addiu(SP, SP, Immediate(1 * kWordSize)); - __ RestoreCodePointer(); - __ LeaveStubFrame(); - __ b(&done_stepping); - } -} - - -// Called from optimized code only. -// SP + 4: left operand. -// SP + 0: right operand. -// Returns: CMPRES1 is zero if equal, non-zero otherwise. -void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub( - Assembler* assembler) { - const Register temp1 = T2; - const Register temp2 = T3; - const Register left = T1; - const Register right = T0; - __ lw(left, Address(SP, 1 * kWordSize)); - __ lw(right, Address(SP, 0 * kWordSize)); - GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2); - __ Ret(); -} - - -// Called from megamorphic calls. -// T0: receiver -// S5: MegamorphicCache (preserved) -// Passed to target: -// CODE_REG: target Code object -// S4: arguments descriptor -void StubCode::GenerateMegamorphicCallStub(Assembler* assembler) { - __ LoadTaggedClassIdMayBeSmi(T0, T0); - // T0: class ID of the receiver (smi). - __ lw(S4, FieldAddress(S5, MegamorphicCache::arguments_descriptor_offset())); - __ lw(T2, FieldAddress(S5, MegamorphicCache::buckets_offset())); - __ lw(T1, FieldAddress(S5, MegamorphicCache::mask_offset())); - // T2: cache buckets array. - // T1: mask. - __ LoadImmediate(TMP, MegamorphicCache::kSpreadFactor); - __ mult(TMP, T0); - __ mflo(T3); - // T3: probe. - - Label loop, update, call_target_function; - __ b(&loop); - - __ Bind(&update); - __ addiu(T3, T3, Immediate(Smi::RawValue(1))); - __ Bind(&loop); - __ and_(T3, T3, T1); - const intptr_t base = Array::data_offset(); - // T3 is smi tagged, but table entries are two words, so LSL 2. - __ sll(TMP, T3, 2); - __ addu(TMP, T2, TMP); - __ lw(T4, FieldAddress(TMP, base)); - - ASSERT(kIllegalCid == 0); - __ beq(T4, ZR, &call_target_function); - __ bne(T4, T0, &update); - - __ Bind(&call_target_function); - // Call the target found in the cache. For a class id match, this is a - // proper target for the given name and arguments descriptor. If the - // illegal class id was found, the target is a cache miss handler that can - // be invoked as a normal Dart function. - __ sll(T1, T3, 2); - __ addu(T1, T2, T1); - __ lw(T0, FieldAddress(T1, base + kWordSize)); - - __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); - __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); - __ jr(T1); -} - - -// Called from switchable IC calls. -// T0: receiver -// S5: ICData (preserved) -// Passed to target: -// CODE_REG: target Code object -// S4: arguments descriptor -void StubCode::GenerateICCallThroughFunctionStub(Assembler* assembler) { - Label loop, found, miss; - __ lw(T6, FieldAddress(S5, ICData::ic_data_offset())); - __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); - __ AddImmediate(T6, T6, Array::data_offset() - kHeapObjectTag); - // T6: first IC entry. - __ LoadTaggedClassIdMayBeSmi(T1, T0); - // T1: receiver cid as Smi - - __ Bind(&loop); - __ lw(T2, Address(T6, 0)); - __ beq(T1, T2, &found); - ASSERT(Smi::RawValue(kIllegalCid) == 0); - __ beq(T2, ZR, &miss); - - const intptr_t entry_length = ICData::TestEntryLengthFor(1) * kWordSize; - __ AddImmediate(T6, entry_length); // Next entry. - __ b(&loop); - - __ Bind(&found); - const intptr_t target_offset = ICData::TargetIndexFor(1) * kWordSize; - __ lw(T0, Address(T6, target_offset)); - __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); - __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); - __ jr(T1); - - __ Bind(&miss); - __ LoadIsolate(T2); - __ lw(CODE_REG, Address(T2, Isolate::ic_miss_code_offset())); - __ lw(T1, FieldAddress(CODE_REG, Code::entry_point_offset())); - __ jr(T1); -} - - -void StubCode::GenerateICCallThroughCodeStub(Assembler* assembler) { - Label loop, found, miss; - __ lw(T6, FieldAddress(S5, ICData::ic_data_offset())); - __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); - __ AddImmediate(T6, T6, Array::data_offset() - kHeapObjectTag); - // T6: first IC entry. - __ LoadTaggedClassIdMayBeSmi(T1, T0); - // T1: receiver cid as Smi - - __ Bind(&loop); - __ lw(T2, Address(T6, 0)); - __ beq(T1, T2, &found); - ASSERT(Smi::RawValue(kIllegalCid) == 0); - __ beq(T2, ZR, &miss); - - const intptr_t entry_length = ICData::TestEntryLengthFor(1) * kWordSize; - __ AddImmediate(T6, entry_length); // Next entry. - __ b(&loop); - - __ Bind(&found); - const intptr_t code_offset = ICData::CodeIndexFor(1) * kWordSize; - const intptr_t entry_offset = ICData::EntryPointIndexFor(1) * kWordSize; - __ lw(T1, Address(T6, entry_offset)); - __ lw(CODE_REG, Address(T6, code_offset)); - __ jr(T1); - - __ Bind(&miss); - __ LoadIsolate(T2); - __ lw(CODE_REG, Address(T2, Isolate::ic_miss_code_offset())); - __ lw(T1, FieldAddress(CODE_REG, Code::entry_point_offset())); - __ jr(T1); -} - - -// Called from switchable IC calls. -// T0: receiver -// S5: SingleTargetCache -void StubCode::GenerateUnlinkedCallStub(Assembler* assembler) { - __ EnterStubFrame(); - __ Push(T0); // Preserve receiver. - - __ Push(ZR); // Result slot. - __ Push(T0); // Arg0: Receiver - __ Push(S5); // Arg1: UnlinkedCall - __ CallRuntime(kUnlinkedCallRuntimeEntry, 2); - __ Drop(2); - __ Pop(S5); // result = IC - - __ Pop(T0); // Restore receiver. - __ LeaveStubFrame(); - - __ lw(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset())); - __ lw(T1, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); - __ jr(T1); -} - - -// Called from switchable IC calls. -// T0: receiver -// S5: SingleTargetCache -// Passed to target: -// CODE_REG: target Code object -void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) { - Label miss; - __ LoadClassIdMayBeSmi(T1, T0); - __ lhu(T2, FieldAddress(S5, SingleTargetCache::lower_limit_offset())); - __ lhu(T3, FieldAddress(S5, SingleTargetCache::upper_limit_offset())); - - __ BranchUnsignedLess(T1, T2, &miss); - __ BranchUnsignedGreater(T1, T3, &miss); - - __ lw(T1, FieldAddress(S5, SingleTargetCache::entry_point_offset())); - __ lw(CODE_REG, FieldAddress(S5, SingleTargetCache::target_offset())); - __ jr(T1); - - __ Bind(&miss); - __ EnterStubFrame(); - __ Push(T0); // Preserve receiver. - - __ Push(ZR); // Result slot. - __ Push(T0); // Arg0: Receiver - __ CallRuntime(kSingleTargetMissRuntimeEntry, 1); - __ Drop(1); - __ Pop(S5); // result = IC - - __ Pop(T0); // Restore receiver. - __ LeaveStubFrame(); - - __ lw(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset())); - __ lw(T1, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); - __ jr(T1); -} - - -// Called from the monomorphic checked entry. -// T0: receiver -void StubCode::GenerateMonomorphicMissStub(Assembler* assembler) { - __ lw(CODE_REG, Address(THR, Thread::monomorphic_miss_stub_offset())); - __ EnterStubFrame(); - __ Push(T0); // Preserve receiver. - - __ Push(ZR); // Result slot. - __ Push(T0); // Arg0: Receiver - __ CallRuntime(kMonomorphicMissRuntimeEntry, 1); - __ Drop(1); - __ Pop(S5); // result = IC - - __ Pop(T0); // Restore receiver. - __ LeaveStubFrame(); - - __ lw(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset())); - __ lw(T1, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); - __ jr(T1); -} - - -void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { - __ break_(0); -} - - -void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) { - __ break_(0); -} - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/stub_code_mips_test.cc b/runtime/vm/stub_code_mips_test.cc deleted file mode 100644 index 13e31017f1e..00000000000 --- a/runtime/vm/stub_code_mips_test.cc +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file -// for details. All rights reserved. Use of this source code is governed by a -// BSD-style license that can be found in the LICENSE file. - -#include "vm/globals.h" -#if defined(TARGET_ARCH_MIPS) - -#include "vm/isolate.h" -#include "vm/dart_entry.h" -#include "vm/native_entry.h" -#include "vm/native_entry_test.h" -#include "vm/object.h" -#include "vm/runtime_entry.h" -#include "vm/stub_code.h" -#include "vm/symbols.h" -#include "vm/unit_test.h" - -#define __ assembler-> - -namespace dart { - -static Function* CreateFunction(const char* name) { - const String& class_name = - String::Handle(Symbols::New(Thread::Current(), "ownerClass")); - const Script& script = Script::Handle(); - const Library& lib = Library::Handle(Library::New(class_name)); - const Class& owner_class = Class::Handle( - Class::New(lib, class_name, script, TokenPosition::kNoSource)); - const String& function_name = - String::ZoneHandle(Symbols::New(Thread::Current(), name)); - Function& function = Function::ZoneHandle(Function::New( - function_name, RawFunction::kRegularFunction, true, false, false, false, - false, owner_class, TokenPosition::kNoSource)); - return &function; -} - - -// Test calls to stub code which calls into the runtime. -static void GenerateCallToCallRuntimeStub(Assembler* assembler, int length) { - const int argc = 2; - const Smi& smi_length = Smi::ZoneHandle(Smi::New(length)); - __ EnterDartFrame(0); - __ PushObject(Object::null_object()); // Push Null object for return value. - __ PushObject(smi_length); // Push argument 1: length. - __ PushObject(Object::null_object()); // Push argument 2: type arguments. - ASSERT(kAllocateArrayRuntimeEntry.argument_count() == argc); - __ CallRuntime(kAllocateArrayRuntimeEntry, argc); - __ addiu(SP, SP, Immediate(argc * kWordSize)); - __ Pop(V0); // Pop return value from return slot. - __ LeaveDartFrameAndReturn(); -} - - -TEST_CASE(CallRuntimeStubCode) { - extern const Function& RegisterFakeFunction(const char* name, - const Code& code); - const int length = 10; - const char* kName = "Test_CallRuntimeStubCode"; - Assembler _assembler_; - GenerateCallToCallRuntimeStub(&_assembler_, length); - const Code& code = Code::Handle(Code::FinalizeCode( - *CreateFunction("Test_CallRuntimeStubCode"), &_assembler_)); - const Function& function = RegisterFakeFunction(kName, code); - Array& result = Array::Handle(); - result ^= DartEntry::InvokeFunction(function, Object::empty_array()); - EXPECT_EQ(length, result.Length()); -} - - -// Test calls to stub code which calls into a leaf runtime entry. -static void GenerateCallToCallLeafRuntimeStub(Assembler* assembler, - const char* value1, - const char* value2) { - const Bigint& bigint1 = - Bigint::ZoneHandle(Bigint::NewFromCString(value1, Heap::kOld)); - const Bigint& bigint2 = - Bigint::ZoneHandle(Bigint::NewFromCString(value2, Heap::kOld)); - __ EnterDartFrame(0); - __ ReserveAlignedFrameSpace(0); - __ LoadObject(A0, bigint1); // Set up argument 1 bigint1. - __ LoadObject(A1, bigint2); // Set up argument 2 bigint2. - __ CallRuntime(kBigintCompareRuntimeEntry, 2); - __ SmiTag(V0); - __ LeaveDartFrameAndReturn(); // Return value is in V0. -} - - -TEST_CASE(CallLeafRuntimeStubCode) { - extern const Function& RegisterFakeFunction(const char* name, - const Code& code); - const char* value1 = "0xAAABBCCDDAABBCCDD"; - const char* value2 = "0xAABBCCDDAABBCCDD"; - const char* kName = "Test_CallLeafRuntimeStubCode"; - Assembler _assembler_; - GenerateCallToCallLeafRuntimeStub(&_assembler_, value1, value2); - const Code& code = Code::Handle(Code::FinalizeCode( - *CreateFunction("Test_CallLeafRuntimeStubCode"), &_assembler_)); - const Function& function = RegisterFakeFunction(kName, code); - Smi& result = Smi::Handle(); - result ^= DartEntry::InvokeFunction(function, Object::empty_array()); - EXPECT_EQ(1, result.Value()); -} - -} // namespace dart - -#endif // defined TARGET_ARCH_MIPS diff --git a/runtime/vm/unit_test.h b/runtime/vm/unit_test.h index 44300b9f6f4..bb275c231e0 100644 --- a/runtime/vm/unit_test.h +++ b/runtime/vm/unit_test.h @@ -162,11 +162,9 @@ } -#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS) || \ - defined(TARGET_ARCH_ARM64) -#if defined(HOST_ARCH_ARM) || defined(HOST_ARCH_MIPS) || \ - defined(HOST_ARCH_ARM64) -// Running on actual ARM or MIPS hardware, execute code natively. +#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) +#if defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64) +// Running on actual ARM hardware, execute code natively. #define EXECUTE_TEST_CODE_INT32(name, entry) reinterpret_cast(entry)() #define EXECUTE_TEST_CODE_INT64(name, entry) reinterpret_cast(entry)() #define EXECUTE_TEST_CODE_INT64_LL(name, entry, long_arg0, long_arg1) \ @@ -182,7 +180,7 @@ #define EXECUTE_TEST_CODE_INT32_INTPTR(name, entry, pointer_arg) \ reinterpret_cast(entry)(pointer_arg) #else -// Not running on ARM or MIPS hardware, call simulator to execute code. +// Not running on ARM hardware, call simulator to execute code. #if defined(ARCH_IS_64_BIT) #define EXECUTE_TEST_CODE_INT64(name, entry) \ static_cast( \ @@ -232,8 +230,8 @@ Utils::Low32Bits(bit_cast(double_arg)), \ Utils::High32Bits(bit_cast(double_arg)), 0, 0, false, \ true)) -#endif // defined(HOST_ARCH_ARM) || defined(HOST_ARCH_MIPS) -#endif // defined(TARGET_ARCH_{ARM, ARM64, MIPS}) +#endif // defined(HOST_ARCH_ARM) +#endif // defined(TARGET_ARCH_{ARM, ARM64}) inline Dart_Handle NewString(const char* str) { diff --git a/runtime/vm/vm_sources.gypi b/runtime/vm/vm_sources.gypi index f982d8c347c..d870c59b319 100644 --- a/runtime/vm/vm_sources.gypi +++ b/runtime/vm/vm_sources.gypi @@ -25,9 +25,6 @@ 'assembler_ia32.cc', 'assembler_ia32.h', 'assembler_ia32_test.cc', - 'assembler_mips.cc', - 'assembler_mips.h', - 'assembler_mips_test.cc', 'assembler_test.cc', 'assembler_x64.cc', 'assembler_x64.h', @@ -98,8 +95,6 @@ 'code_patcher_dbc.cc', 'code_patcher_ia32.cc', 'code_patcher_ia32_test.cc', - 'code_patcher_mips.cc', - 'code_patcher_mips_test.cc', 'code_patcher_x64.cc', 'code_patcher_x64_test.cc', 'compilation_trace.cc', @@ -114,14 +109,12 @@ 'constants_arm.h', 'constants_arm64.h', 'constants_ia32.h', - 'constants_mips.h', 'constants_x64.h', 'cpu.h', 'cpu_arm.cc', 'cpu_arm64.cc', 'cpu_dbc.cc', 'cpu_ia32.cc', - 'cpu_mips.cc', 'cpu_test.cc', 'cpu_x64.cc', 'cpuid.h', @@ -154,7 +147,6 @@ 'debugger_arm64.cc', 'debugger_dbc.cc', 'debugger_ia32.cc', - 'debugger_mips.cc', 'debugger_x64.cc', 'deferred_objects.cc', 'deferred_objects.h', @@ -166,7 +158,6 @@ 'disassembler_arm64.cc', 'disassembler_dbc.cc', 'disassembler_ia32.cc', - 'disassembler_mips.cc', 'disassembler_test.cc', 'disassembler_x64.cc', 'double_conversion.cc', @@ -197,7 +188,6 @@ 'flow_graph_compiler_arm64.cc', 'flow_graph_compiler_dbc.cc', 'flow_graph_compiler_ia32.cc', - 'flow_graph_compiler_mips.cc', 'flow_graph_compiler_x64.cc', 'flow_graph_inliner.cc', 'flow_graph_inliner.h', @@ -242,9 +232,6 @@ 'instructions_ia32.cc', 'instructions_ia32.h', 'instructions_ia32_test.cc', - 'instructions_mips.cc', - 'instructions_mips.h', - 'instructions_mips_test.cc', 'instructions_x64.cc', 'instructions_x64.h', 'instructions_x64_test.cc', @@ -254,7 +241,6 @@ 'intermediate_language_arm64.cc', 'intermediate_language_dbc.cc', 'intermediate_language_ia32.cc', - 'intermediate_language_mips.cc', 'intermediate_language_test.cc', 'intermediate_language_x64.cc', 'intrinsifier.cc', @@ -263,7 +249,6 @@ 'intrinsifier_arm64.cc', 'intrinsifier_dbc.cc', 'intrinsifier_ia32.cc', - 'intrinsifier_mips.cc', 'intrinsifier_x64.cc', 'isolate.cc', 'isolate.h', @@ -294,7 +279,6 @@ 'malloc_hooks_arm.cc', 'malloc_hooks_arm64.cc', 'malloc_hooks_ia32.cc', - 'malloc_hooks_mips.cc', 'malloc_hooks_x64.cc', 'malloc_hooks.h', 'malloc_hooks_test.cc', @@ -340,7 +324,6 @@ 'object_id_ring.cc', 'object_id_ring.h', 'object_id_ring_test.cc', - 'object_mips_test.cc', 'object_reload.cc', 'object_service.cc', 'object_set.h', @@ -438,7 +421,6 @@ 'runtime_entry_arm64.cc', 'runtime_entry_dbc.cc', 'runtime_entry_ia32.cc', - 'runtime_entry_mips.cc', 'runtime_entry.cc', 'runtime_entry_x64.cc', 'safepoint.cc', @@ -473,8 +455,6 @@ 'simulator_arm64.h', 'simulator_dbc.cc', 'simulator_dbc.h', - 'simulator_mips.cc', - 'simulator_mips.h', 'snapshot.cc', 'snapshot.h', 'snapshot_ids.h', @@ -488,7 +468,6 @@ 'stack_frame_arm.h', 'stack_frame_arm64.h', 'stack_frame_ia32.h', - 'stack_frame_mips.h', 'stack_frame_test.cc', 'stack_frame_x64.h', 'stack_trace.cc', @@ -504,8 +483,6 @@ 'stub_code_dbc.cc', 'stub_code_ia32.cc', 'stub_code_ia32_test.cc', - 'stub_code_mips.cc', - 'stub_code_mips_test.cc', 'stub_code_x64.cc', 'stub_code_x64_test.cc', 'symbols.cc', diff --git a/sdk/bin/dart b/sdk/bin/dart index 7dd1e92f9aa..f50184bf44d 100755 --- a/sdk/bin/dart +++ b/sdk/bin/dart @@ -30,8 +30,8 @@ then DIRS=$( ls "$OUT_DIR" ) # list of possible configurations in decreasing desirability CONFIGS=("ReleaseX64" "ReleaseIA32" "DebugX64" "DebugIA32" - "ReleaseARM" "ReleaseARM64" "ReleaseARMV5TE" "ReleaseMIPS" - "DebugARM" "DebugARM64" "DebugARMV5TE" "DebugMIPS") + "ReleaseARM" "ReleaseARM64" "ReleaseARMV5TE" + "DebugARM" "DebugARM64" "DebugARMV5TE") DART_CONFIGURATION="None" for CONFIG in ${CONFIGS[*]} do diff --git a/sdk/bin/pub b/sdk/bin/pub index 20e9d3d8970..5dda6113ec7 100755 --- a/sdk/bin/pub +++ b/sdk/bin/pub @@ -47,8 +47,8 @@ then DIRS=$( ls "$OUT_DIR" ) # list of possible configurations in decreasing desirability CONFIGS=("ReleaseX64" "ReleaseIA32" "DebugX64" "DebugIA32" - "ReleaseARM" "ReleaseARM64" "ReleaseARMV5TE" "ReleaseMIPS" - "DebugARM" "DebugARM64" "DebugARMV5TE" "DebugMIPS") + "ReleaseARM" "ReleaseARM64" "ReleaseARMV5TE" + "DebugARM" "DebugARM64" "DebugARMV5TE") DART_CONFIGURATION="None" for CONFIG in ${CONFIGS[*]} do diff --git a/tests/co19/co19-runtime.status b/tests/co19/co19-runtime.status index 3e59b361a8f..275a10ffcf2 100644 --- a/tests/co19/co19-runtime.status +++ b/tests/co19/co19-runtime.status @@ -55,14 +55,14 @@ LibTest/core/List/List_class_A01_t02: Pass, Slow [ ($runtime == vm || $runtime == dart_precompiled) && ($arch != x64 && $arch != simarm64 && $arch != arm64 && $arch != simdbc64 && $arch != simdbc) ] LibTest/core/int/operator_left_shift_A01_t02: Fail # co19 issue 129 -[ ($compiler == none || $compiler == precompiler) && ($runtime == vm || $runtime == dart_precompiled) && ($arch == mips || $arch == arm64) ] +[ ($compiler == none || $compiler == precompiler) && ($runtime == vm || $runtime == dart_precompiled) && $arch == arm64 ] # These tests take too much memory (300 MB) for our 1 GB test machine. # co19 issue 673. http://code.google.com/p/co19/issues/detail?id=673 LibTest/core/List/List_class_A01_t02: Skip # co19 issue 673 LibTest/collection/ListMixin/ListMixin_class_A01_t02: Skip # co19 issue 673 LibTest/collection/ListBase/ListBase_class_A01_t02: Skip # co19 issue 673 -[ ($runtime == vm || $runtime == dart_precompiled) && ($arch == simarm || $arch == simarmv6 || $arch == simarmv5te || $arch == simmips || $arch == simarm64 || $arch == simdbc || $arch == simdbc64) ] +[ ($runtime == vm || $runtime == dart_precompiled) && ($arch == simarm || $arch == simarmv6 || $arch == simarmv5te || $arch == simarm64 || $arch == simdbc || $arch == simdbc64) ] LibTest/collection/DoubleLinkedQueue/DoubleLinkedQueue_class_A01_t01: Skip # Timeout LibTest/collection/IterableBase/IterableBase_class_A01_t02: Skip # Timeout LibTest/collection/IterableMixin/IterableMixin_class_A02_t01: Skip # Timeout diff --git a/tests/isolate/isolate.status b/tests/isolate/isolate.status index d0a7b583107..d4124efb3bc 100644 --- a/tests/isolate/isolate.status +++ b/tests/isolate/isolate.status @@ -12,9 +12,6 @@ isolate_stress_test: Skip # Issue 12588: Uses dart:html. This should be able to [ $runtime != vm || $mode == product || $compiler == app_jit ] checked_test: Skip # Unsupported. -[ ($runtime == vm || $runtime == dart_precompiled) && $arch == mips && $mode == debug ] -mandel_isolate_test: Skip # Uses 600 MB Ram on our 1 GB test device. - [ $compiler == none || $compiler == precompiler || $compiler == app_jit ] compile_time_error_test/01: Skip # Issue 12587 ping_test: Skip # Resolve test issues diff --git a/tests/language/language.status b/tests/language/language.status index e9b969fbfd5..c184c3e1259 100644 --- a/tests/language/language.status +++ b/tests/language/language.status @@ -144,9 +144,6 @@ async_await_test: Skip # Issue 26198 [ $compiler == none && $runtime == drt ] disassemble_test: Pass, Fail # Issue 18122 -[ ($runtime == vm || $runtime == dart_precompiled) && $arch == mips && $mode == debug ] -large_class_declaration_test: SkipSlow # Times out. Issue 20352 - [ ($runtime == vm || $runtime == flutter || $runtime == dart_precompiled) && $arch == arm64 ] large_class_declaration_test: SkipSlow # Uses too much memory. closure_cycles_test: Pass, Slow @@ -154,12 +151,11 @@ closure_cycles_test: Pass, Slow [ $compiler == none && ($runtime == dartium || $runtime == drt) && $mode == debug ] large_class_declaration_test: SkipSlow # Times out. Issue 20352 -[ ($runtime == vm || $runtime == dart_precompiled) && ($arch == simmips || $arch == mips) ] +[ $runtime == vm || $runtime == dart_precompiled ] vm/load_to_load_unaligned_forwarding_vm_test: Pass, Crash # Unaligned offset. Issue 22151 vm/unaligned_float_access_literal_index_test: Pass, Crash # Unaligned offset. Issue 22151 vm/unaligned_float_access_literal_index_test: Pass, Crash # Unaligned offset. Issue 22151 - [ $compiler == none && ($runtime == dartium || $runtime == drt) ] issue23244_test: Fail # Issue 23244 diff --git a/tests/standalone/io/test_extension_test.dart b/tests/standalone/io/test_extension_test.dart index c2c09486235..ebef743459e 100644 --- a/tests/standalone/io/test_extension_test.dart +++ b/tests/standalone/io/test_extension_test.dart @@ -57,11 +57,9 @@ String getArchFromBuildDir(String buildDirectory) { if (buildDirectory.endsWith('SIMARM64')) return ''; if (buildDirectory.endsWith('SIMDBC')) return ''; if (buildDirectory.endsWith('SIMDBC64')) return ''; - if (buildDirectory.endsWith('SIMMIPS')) return ''; if (buildDirectory.endsWith('ARM')) return '-arm'; if (buildDirectory.endsWith('ARM64')) return '-arm64'; if (buildDirectory.endsWith('IA32')) return '-ia32'; - if (buildDirectory.endsWith('MIPS')) return '-mips'; if (buildDirectory.endsWith('X64')) return '-x64'; return 'unknown'; } diff --git a/tests/standalone/standalone.status b/tests/standalone/standalone.status index 0f7505f96bc..16330cc7027 100644 --- a/tests/standalone/standalone.status +++ b/tests/standalone/standalone.status @@ -119,31 +119,16 @@ io/file_stream_test: Skip # Issue 26109 io/file_typed_data_test: Skip # Issue 26109 io/file_input_stream_test: Skip # Issue 26109 -[ $runtime != vm || $arch == arm || $arch == arm64 || $arch == mips || ($system == windows && $mode == debug) ] +[ $runtime != vm || $arch == arm || $arch == arm64 || ($system == windows && $mode == debug) ] fragmentation_test: Skip # VM test uses too much memory for small systems. -[ $arch == simarm || $arch == simarmv6 || $arch == simarmv5te || $arch == simmips ] +[ $arch == simarm || $arch == simarmv6 || $arch == simarmv5te ] out_of_memory_test: Skip # passes on Mac, crashes on Linux oom_error_stacktrace_test: Skip # Fails on Linux -[ $arch == simmips ] -io/socket_bind_test: Pass, Fail # Issue 28315 -io/http_server_response_test: Pass, Crash # Issue 29012 - [ ($arch == simarm || $arch == simdbc || $arch == simdbc64) && $mode == debug && $checked ] io/web_socket_test: Pass, Fail # Issue 26814 -[ $arch == mips ] -io/file_stat_test: Fail # Issue 17440 -io/process_sync_test: Skip # Starts 10 dart subprocesses, uses too much memory. -io/signals_test: Skip # Starts 10 dart subprocesses, uses too much memory -io/socket_source_address_test: Fail # Issue 22597 - -[ $arch == mips && $mode == debug ] -io/web_socket_test: SkipSlow # Times out. Issue 20352 -io/test_runner_test: Skip # Flakily times out in a subtest. Issue 201351 -io/http_client_stays_alive_test: Skip # Timing dependent test, MIPS machine too slow. - [ $compiler == none && $runtime == dartium && ! $checked ] assert_test: Fail # Issue 14651. diff --git a/third_party/pkg_tested/pkg_tested.status b/third_party/pkg_tested/pkg_tested.status index 9a584763d11..958daf2287f 100644 --- a/third_party/pkg_tested/pkg_tested.status +++ b/third_party/pkg_tested/pkg_tested.status @@ -23,7 +23,7 @@ pub/*: SkipByDesign pub/test/run/app_can_read_from_stdin_test: Fail # Issue 19448 pub/test/run/forwards_signal_posix_test: SkipByDesign -[ $runtime == vm && ($mode == debug || $arch == mips || $arch == simmips || $arch == simarm || $arch == simarmv6 || $arch == simarmv5te || $arch == simarm64 || $builder_tag == asan) ] -dart_style/test/command_line_test: Skip # The test controller does not take into account that tests take much longer in debug mode or on simulators/mips. -dart_style/test/formatter_test: Skip # The test controller does not take into account that tests take much longer in debug mode or on simulators/mips. +[ $runtime == vm && ($mode == debug || $arch == simarm || $arch == simarmv6 || $arch == simarmv5te || $arch == simarm64 || $builder_tag == asan) ] +dart_style/test/command_line_test: Skip # The test controller does not take into account that tests take much longer in debug mode or on simulators. +dart_style/test/formatter_test: Skip # The test controller does not take into account that tests take much longer in debug mode or on simulators. diff --git a/third_party/tcmalloc/README.dart b/third_party/tcmalloc/README.dart index bf326f93d79..f4705a9092f 100644 --- a/third_party/tcmalloc/README.dart +++ b/third_party/tcmalloc/README.dart @@ -21,5 +21,5 @@ To roll tcmalloc forward: . Update the DEPS file with the new git hash. -. Build and run tests for Debug, Release, and Product builds for ia32, x64, mips +. Build and run tests for Debug, Release, and Product builds for ia32, x64, and arm for Linux and any other OSs that are supported. diff --git a/tools/build.py b/tools/build.py index fe971d360ba..0c780f37076 100755 --- a/tools/build.py +++ b/tools/build.py @@ -63,7 +63,7 @@ def BuildOptions(): result.add_option("-a", "--arch", help='Target architectures (comma-separated).', metavar='[all,ia32,x64,simarm,arm,simarmv6,armv6,simarmv5te,armv5te,' - 'simmips,mips,simarm64,arm64,simdbc,armsimdbc]', + 'simarm64,arm64,simdbc,armsimdbc]', default=utils.GuessArchitecture()) result.add_option("--os", help='Target OSs (comma-separated).', @@ -117,7 +117,7 @@ def ProcessOptions(options, args): return False for arch in options.arch: archs = ['ia32', 'x64', 'simarm', 'arm', 'simarmv6', 'armv6', - 'simarmv5te', 'armv5te', 'simmips', 'mips', 'simarm64', 'arm64', + 'simarmv5te', 'armv5te', 'simarm64', 'arm64', 'simdbc', 'simdbc64', 'armsimdbc', 'armsimdbc64'] if not arch in archs: print "Unknown arch %s" % arch @@ -135,7 +135,7 @@ def ProcessOptions(options, args): print ("Cross-compilation to %s is not supported on host os %s." % (os_name, HOST_OS)) return False - if not arch in ['ia32', 'x64', 'arm', 'armv6', 'armv5te', 'arm64', 'mips', + if not arch in ['ia32', 'x64', 'arm', 'armv6', 'armv5te', 'arm64', 'simdbc', 'simdbc64']: print ("Cross-compilation to %s is not supported for architecture %s." % (os_name, arch)) @@ -175,8 +175,6 @@ def GetToolchainPrefix(target_os, arch, options): if arch == 'arm64': return (DEFAULT_ARM_CROSS_COMPILER_PATH + "/aarch64-linux-gnu") - # TODO(zra): Find default MIPS Linux cross-compiler. - return None @@ -484,13 +482,6 @@ def BuildNinjaCommand(options, target, target_os, mode, arch): filter_xcodebuild_output = False def BuildOneConfig(options, target, target_os, mode, arch): global filter_xcodebuild_output - if arch.startswith('mips'): - bold = '\033[1m' - reset = '\033[0m' - print(bold + "Warning: MIPS architectures are unlikely to be supported in " - "upcoming releases. Please consider using another architecture " - "and/or file an issue explaining your specific use of and need for " - "MIPS support." + reset) start_time = time.time() args = [] build_config = utils.GetBuildConf(mode, arch, target_os) diff --git a/tools/gardening/lib/src/buildbot_data.dart b/tools/gardening/lib/src/buildbot_data.dart index 31fd89649d8..a061ab1fe2e 100644 --- a/tools/gardening/lib/src/buildbot_data.dart +++ b/tools/gardening/lib/src/buildbot_data.dart @@ -96,18 +96,6 @@ const List buildGroups = const [ 'vm tests', 'checked vm tests', ]), - const BuildSubgroup(shardNames: const [ - 'vm-linux-debug-simmips-be', - ], testSteps: const [ - 'vm tests', - 'checked vm tests', - ]), - const BuildSubgroup(shardNames: const [ - 'vm-linux-release-simmips-be', - ], testSteps: const [ - 'vm tests', - 'checked vm tests', - ]), const BuildSubgroup(shardNames: const [ 'vm-linux-debug-simarm-be', ], testSteps: const [ diff --git a/tools/gardening/lib/src/shard_data.dart b/tools/gardening/lib/src/shard_data.dart index d80fcfaf10e..5553b941ee1 100644 --- a/tools/gardening/lib/src/shard_data.dart +++ b/tools/gardening/lib/src/shard_data.dart @@ -22,8 +22,6 @@ const Map> shardGroups = const { 'vm-win-release-x64-be', 'vm-win-debug-ia32-be', 'vm-win-release-ia32-be', - 'vm-linux-debug-simmips-be', - 'vm-linux-release-simmips-be', 'vm-linux-debug-simarm-be', 'vm-linux-release-simarm-be', 'vm-linux-release-simarm64-be', @@ -175,8 +173,6 @@ const Map> shardGroups = const { 'vm-win-release-x64-dev', 'vm-win-debug-ia32-dev', 'vm-win-release-ia32-dev', - 'vm-linux-debug-simmips-dev', - 'vm-linux-release-simmips-dev', 'vm-linux-debug-simarm-dev', 'vm-linux-release-simarm-dev', 'vm-linux-release-simarm64-dev', @@ -356,8 +352,6 @@ const Map> shardGroups = const { 'vm-win-release-x64-stable', 'vm-win-debug-ia32-stable', 'vm-win-release-ia32-stable', - 'vm-linux-debug-simmips-stable', - 'vm-linux-release-simmips-stable', 'vm-linux-debug-simarm-stable', 'vm-linux-release-simarm-stable', 'vm-linux-release-simarm64-stable', diff --git a/tools/gn.py b/tools/gn.py index d609560f58b..54e48e9bd2f 100755 --- a/tools/gn.py +++ b/tools/gn.py @@ -75,8 +75,8 @@ def ToCommandLine(gn_args): def HostCpuForArch(arch): - if arch in ['ia32', 'arm', 'armv6', 'armv5te', 'mips', - 'simarm', 'simarmv6', 'simarmv5te', 'simmips', 'simdbc', + if arch in ['ia32', 'arm', 'armv6', 'armv5te', + 'simarm', 'simarmv6', 'simarmv5te', 'simdbc', 'armsimdbc']: return 'x86' if arch in ['x64', 'arm64', 'simarm64', 'simdbc64', 'armsimdbc64']: @@ -84,12 +84,10 @@ def HostCpuForArch(arch): def TargetCpuForArch(arch, target_os): - if arch in ['ia32', 'simarm', 'simarmv6', 'simarmv5te', 'simmips']: + if arch in ['ia32', 'simarm', 'simarmv6', 'simarmv5te']: return 'x86' if arch in ['simarm64']: return 'x64' - if arch == 'mips': - return 'mipsel' if arch == 'simdbc': return 'arm' if target_os == 'android' else 'x86' if arch == 'simdbc64': @@ -127,14 +125,12 @@ def DontUseClang(args, target_os, host_cpu, target_cpu): # We don't have clang on Windows. return (target_os == 'win' # TODO(zra): Experiment with using clang for the arm cross-builds. - or (target_os == 'linux' - and (target_cpu.startswith('arm') or - target_cpu.startswith('mips')) + or (target_os == 'linux' and target_cpu.startswith('arm')) # TODO(zra): Only use clang when a sanitizer build is specified until # clang bugs in tcmalloc inline assembly for ia32 are fixed. or (target_os == 'linux' and host_cpu == 'x86' - and not UseSanitizer(args)))) + and not UseSanitizer(args))) def ToGnArgs(args, mode, arch, target_os): @@ -146,14 +142,6 @@ def ToGnArgs(args, mode, arch, target_os): else: gn_args['target_os'] = target_os - if arch.startswith('mips'): - bold = '\033[1m' - reset = '\033[0m' - print(bold + "Warning: MIPS architectures are unlikely to be supported in " - "upcoming releases. Please consider using another architecture " - "and/or file an issue explaining your specific use of and need for " - "MIPS support." + reset) - gn_args['dart_target_arch'] = arch gn_args['target_cpu'] = TargetCpuForArch(arch, target_os) gn_args['host_cpu'] = HostCpuForArch(arch) @@ -272,7 +260,7 @@ def ProcessOptions(args): return False for arch in args.arch: archs = ['ia32', 'x64', 'simarm', 'arm', 'simarmv6', 'armv6', - 'simarmv5te', 'armv5te', 'simmips', 'mips', 'simarm64', 'arm64', + 'simarmv5te', 'armv5te', 'simarm64', 'arm64', 'simdbc', 'simdbc64', 'armsimdbc', 'armsimdbc64'] if not arch in archs: print "Unknown arch %s" % arch @@ -290,7 +278,7 @@ def ProcessOptions(args): print ("Cross-compilation to %s is not supported on host os %s." % (os_name, HOST_OS)) return False - if not arch in ['ia32', 'x64', 'arm', 'armv6', 'armv5te', 'arm64', 'mips', + if not arch in ['ia32', 'x64', 'arm', 'armv6', 'armv5te', 'arm64', 'simdbc', 'simdbc64']: print ("Cross-compilation to %s is not supported for architecture %s." % (os_name, arch)) @@ -323,7 +311,7 @@ def parse_args(args): type=str, help='Target architectures (comma-separated).', metavar='[all,ia32,x64,simarm,arm,simarmv6,armv6,simarmv5te,armv5te,' - 'simmips,mips,simarm64,arm64,simdbc,armsimdbc]', + 'simarm64,arm64,simdbc,armsimdbc]', default='x64') common_group.add_argument('--mode', '-m', type=str, diff --git a/tools/gyp/configurations.gypi b/tools/gyp/configurations.gypi index 77d05b331bb..3d02bc66990 100644 --- a/tools/gyp/configurations.gypi +++ b/tools/gyp/configurations.gypi @@ -24,8 +24,6 @@ ['"<(target_arch)"=="simarmv6"', { 'dart_target_arch': 'SIMARMV6', }], ['"<(target_arch)"=="simarmv5te"', { 'dart_target_arch': 'SIMARMV5TE', }], ['"<(target_arch)"=="simarm64"', { 'dart_target_arch': 'SIMARM64', }], - ['"<(target_arch)"=="mips"', { 'dart_target_arch': 'MIPS', }], - ['"<(target_arch)"=="simmips"', { 'dart_target_arch': 'SIMMIPS', }], ['"<(target_arch)"=="simdbc"', { 'dart_target_arch': 'SIMDBC', }], ['"<(target_arch)"=="simdbc64"', { 'dart_target_arch': 'SIMDBC64', }], [ 'OS=="linux"', { 'dart_target_os': 'Linux', } ], @@ -120,20 +118,6 @@ ], }, - 'Dart_simmips_Base': { - 'abstract': 1, - 'defines': [ - 'TARGET_ARCH_MIPS', - ] - }, - - 'Dart_mips_Base': { - 'abstract': 1, - 'defines': [ - 'TARGET_ARCH_MIPS', - ], - }, - 'Dart_simdbc_Base': { 'abstract': 1, 'defines': [ @@ -336,36 +320,6 @@ ], }, - 'DebugSIMMIPS': { - 'inherit_from': [ - 'Dart_Base', 'Dart_simmips_Base', 'Dart_Debug', - 'Dart_<(dart_target_os)_Base', - 'Dart_<(dart_target_os)_simmips_Base', - 'Dart_<(dart_target_os)_Debug', - ], - 'defines': [ - 'DEBUG', - ], - }, - - 'ReleaseSIMMIPS': { - 'inherit_from': [ - 'Dart_Base', 'Dart_simmips_Base', 'Dart_Release', - 'Dart_<(dart_target_os)_Base', - 'Dart_<(dart_target_os)_simmips_Base', - 'Dart_<(dart_target_os)_Release', - ], - }, - - 'ProductSIMMIPS': { - 'inherit_from': [ - 'Dart_Base', 'Dart_simmips_Base', 'Dart_Product', - 'Dart_<(dart_target_os)_Base', - 'Dart_<(dart_target_os)_simmips_Base', - 'Dart_<(dart_target_os)_Product', - ], - }, - 'DebugSIMDBC': { 'inherit_from': [ 'Dart_Base', 'Dart_simdbc_Base', 'Dart_Debug', @@ -458,7 +412,7 @@ ], }, - # ARM and MIPS hardware configurations are only for Linux and Android. + # ARM hardware configurations are only for Linux and Android. 'DebugXARM': { 'inherit_from': [ 'Dart_Base', 'Dart_arm_Base', 'Dart_Debug', @@ -675,62 +629,6 @@ ], }, - 'DebugXMIPS': { - 'inherit_from': [ - 'Dart_Base', 'Dart_mips_Base', 'Dart_Debug', - 'Dart_Linux_Base', - 'Dart_Linux_xmips_Base', - 'Dart_Linux_xmips_Debug', - 'Dart_Linux_Debug', - ], - }, - - 'ReleaseXMIPS': { - 'inherit_from': [ - 'Dart_Base', 'Dart_mips_Base', 'Dart_Release', - 'Dart_Linux_Base', - 'Dart_Linux_xmips_Base', - 'Dart_Linux_xmips_Release', - 'Dart_Linux_Release', - ], - }, - - 'ProductXMIPS': { - 'inherit_from': [ - 'Dart_Base', 'Dart_mips_Base', 'Dart_Product', - 'Dart_Linux_Base', - 'Dart_Linux_xmips_Base', - 'Dart_Linux_Product', - ], - }, - - 'DebugMIPS': { - 'inherit_from': [ - 'Dart_Base', 'Dart_mips_Base', 'Dart_Debug', - 'Dart_Linux_Base', - 'Dart_Linux_mips_Base', - 'Dart_Linux_Debug', - ], - }, - - 'ReleaseMIPS': { - 'inherit_from': [ - 'Dart_Base', 'Dart_mips_Base', 'Dart_Release', - 'Dart_Linux_Base', - 'Dart_Linux_mips_Base', - 'Dart_Linux_Release', - ], - }, - - 'ProductMIPS': { - 'inherit_from': [ - 'Dart_Base', 'Dart_mips_Base', 'Dart_Product', - 'Dart_Linux_Base', - 'Dart_Linux_mips_Base', - 'Dart_Linux_Product', - ], - }, - # Android configurations. The configuration names explicitly include # 'Android' because we are cross-building from Linux, and, when building # the standalone VM, we cannot inspect the gyp built-in 'OS' variable to diff --git a/tools/gyp/configurations_make.gypi b/tools/gyp/configurations_make.gypi index 93d08114add..523dfab6672 100644 --- a/tools/gyp/configurations_make.gypi +++ b/tools/gyp/configurations_make.gypi @@ -257,91 +257,6 @@ ], }, - 'Dart_Linux_simmips_Base': { - 'abstract': 1, - 'cflags': [ - '-O3', - '-m32', - '-msse2', - '-mfpmath=sse', - ], - 'ldflags': [ - '-m32', - ], - }, - - # MIPS cross-build - 'Dart_Linux_xmips_Base': { - 'abstract': 1, - 'target_conditions': [ - ['_toolset=="target"', { - 'cflags': [ - '-EL', - '-march=mips32', - '-mhard-float', - '-fno-strict-overflow', - ], - 'ldflags': [ - '-EL', - ], - }], - ['_toolset=="host"',{ - 'cflags': [ - '-O3', - '-m32', - '-msse2', - '-mfpmath=sse', - ], - 'ldflags': [ - '-m32', - ], - }]] - }, - - # These flags are needed for tcmalloc to be able to collect stack traces - # for heap profiling on mips. - 'Dart_Linux_xmips_Debug': { - 'abstract': 1, - 'target_conditions': [ - ['_toolset=="target"', { - 'cflags!': [ - '-fno-exceptions', - ], - 'cflags': [ - '-fexceptions', - '-funwind-tables', - ], - }], - ], - }, - - # These flags are needed for tcmalloc to be able to collect stack traces - # for heap profiling on mips. - 'Dart_Linux_xmips_Release': { - 'abstract': 1, - 'target_conditions': [ - ['_toolset=="target"', { - 'cflags!': [ - '-fno-exceptions', - ], - 'cflags': [ - '-fexceptions', - '-funwind-tables', - ], - }], - ], - }, - - # MIPS native build - 'Dart_Linux_mips_Base': { - 'abstract': 1, - 'cflags': [ - '-march=mips32', - '-mhard-float', - '-fno-strict-overflow', - ], - }, - 'Dart_Linux_Debug': { 'abstract': 1, 'cflags': [ diff --git a/tools/gyp/configurations_msvs.gypi b/tools/gyp/configurations_msvs.gypi index 10e3c472f9d..530b70152f8 100644 --- a/tools/gyp/configurations_msvs.gypi +++ b/tools/gyp/configurations_msvs.gypi @@ -38,9 +38,6 @@ 'Dart_Win_simarm64_Base': { 'abstract': 1, }, - 'Dart_Win_simmips_Base': { - 'abstract': 1, - }, 'Dart_Win_simdbc_Base': { 'abstract': 1, }, diff --git a/tools/gyp/configurations_xcode.gypi b/tools/gyp/configurations_xcode.gypi index d0e87707c54..5ce3b688dea 100644 --- a/tools/gyp/configurations_xcode.gypi +++ b/tools/gyp/configurations_xcode.gypi @@ -74,9 +74,6 @@ 'Dart_Macos_simarm64_Base': { 'abstract': 1, }, - 'Dart_Macos_simmips_Base': { - 'abstract': 1, - }, 'Dart_Macos_simdbc_Base': { 'abstract': 1, }, diff --git a/tools/ninja.py b/tools/ninja.py index d7b6ee8b80d..e334564e41f 100755 --- a/tools/ninja.py +++ b/tools/ninja.py @@ -40,7 +40,7 @@ def BuildOptions(): result.add_option("-a", "--arch", help='Target architectures (comma-separated).', metavar='[all,ia32,x64,simarm,arm,simarmv6,armv6,simarmv5te,armv5te,' - 'simmips,mips,simarm64,arm64,simdbc,armsimdbc]', + 'simarm64,arm64,simdbc,armsimdbc]', default=utils.GuessArchitecture()) result.add_option("--os", help='Target OSs (comma-separated).', @@ -76,7 +76,7 @@ def ProcessOptions(options, args): return False for arch in options.arch: archs = ['ia32', 'x64', 'simarm', 'arm', 'simarmv6', 'armv6', - 'simarmv5te', 'armv5te', 'simmips', 'mips', 'simarm64', 'arm64', + 'simarmv5te', 'armv5te', 'simarm64', 'arm64', 'simdbc', 'simdbc64', 'armsimdbc', 'armsimdbc64'] if not arch in archs: print "Unknown arch %s" % arch @@ -94,7 +94,7 @@ def ProcessOptions(options, args): print ("Cross-compilation to %s is not supported on host os %s." % (os_name, HOST_OS)) return False - if not arch in ['ia32', 'x64', 'arm', 'armv6', 'armv5te', 'arm64', 'mips', + if not arch in ['ia32', 'x64', 'arm', 'armv6', 'armv5te', 'arm64', 'simdbc', 'simdbc64']: print ("Cross-compilation to %s is not supported for architecture %s." % (os_name, arch)) @@ -225,13 +225,6 @@ def EnsureGomaStarted(out_dir): # Returns a tuple (build_config, command to run, whether goma is used) def BuildOneConfig(options, targets, target_os, mode, arch): - if arch.startswith('mips'): - bold = '\033[1m' - reset = '\033[0m' - print(bold + "Warning: MIPS architectures are unlikely to be supported in " - "upcoming releases. Please consider using another architecture " - "and/or file an issue explaining your specific use of and need for " - "MIPS support." + reset) build_config = utils.GetBuildConf(mode, arch, target_os) out_dir = utils.GetBuildRoot(HOST_OS, mode, arch, target_os) using_goma = False diff --git a/tools/sdks/README b/tools/sdks/README index 2ceb6a8a192..480627f0ead 100644 --- a/tools/sdks/README +++ b/tools/sdks/README @@ -18,8 +18,7 @@ processor architecture. To upload new versions of these tar files, use the "upload_to_google_storage" tool in depot_tools, and download the new stable SDKs from the dartlang.org -web page. The mips and arm executables must be copied from the machines that +web page. The arm executables must be copied from the machines that build them, and stripped. Because these SDKs are used for the presubmit dartfmt check on changed files, they may need to be updated often when dartfmt is changing rapidly. - diff --git a/tools/testing/dart/compiler_configuration.dart b/tools/testing/dart/compiler_configuration.dart index 4cfdbb933f0..7f00973ed2c 100644 --- a/tools/testing/dart/compiler_configuration.dart +++ b/tools/testing/dart/compiler_configuration.dart @@ -722,16 +722,10 @@ class PrecompilerCompilerConfiguration extends CompilerConfiguration { break; case Architecture.ia32: case Architecture.simarm: - case Architecture.simmips: - ccFlags = "-m32"; - break; case Architecture.arm: case Architecture.arm64: ccFlags = null; break; - case Architecture.mips: - ccFlags = "-EL"; - break; default: throw "Architecture not supported: ${arch.name}"; } diff --git a/tools/testing/dart/configuration.dart b/tools/testing/dart/configuration.dart index b5cd52bc3ae..4556d85d80b 100644 --- a/tools/testing/dart/configuration.dart +++ b/tools/testing/dart/configuration.dart @@ -443,8 +443,6 @@ class Architecture { static const simarmv6 = const Architecture._('simarmv6'); static const simarmv5te = const Architecture._('simarmv5te'); static const simarm64 = const Architecture._('simarm64'); - static const mips = const Architecture._('mips'); - static const simmips = const Architecture._('simmips'); static const simdbc = const Architecture._('simdbc'); static const simdbc64 = const Architecture._('simdbc64'); @@ -461,8 +459,6 @@ class Architecture { simarmv6, simarmv5te, simarm64, - mips, - simmips, simdbc, simdbc64 ], key: (Architecture architecture) => architecture.name); diff --git a/tools/testing/dart/options.dart b/tools/testing/dart/options.dart index ca6f80cd13d..49f32a273f9 100644 --- a/tools/testing/dart/options.dart +++ b/tools/testing/dart/options.dart @@ -163,7 +163,6 @@ all ia32, x64 arm, armv6, armv5te, arm64, simarm, simarmv6, simarmv5te, simarm64, -mips, simmips simdbc, simdbc64''', abbr: 'a', values: ['all']..addAll(Architecture.names), diff --git a/tools/testing/dart/runtime_configuration.dart b/tools/testing/dart/runtime_configuration.dart index 73b9a8657ab..ec859c25399 100644 --- a/tools/testing/dart/runtime_configuration.dart +++ b/tools/testing/dart/runtime_configuration.dart @@ -181,8 +181,6 @@ class DartVmRuntimeConfiguration extends RuntimeConfiguration { case Architecture.armv6: case Architecture.simarmv5te: case Architecture.armv5te: - case Architecture.simmips: - case Architecture.mips: case Architecture.simarm64: case Architecture.simdbc: case Architecture.simdbc64: diff --git a/tools/testing/dart/status_reporter.dart b/tools/testing/dart/status_reporter.dart index b6fd12df8da..accb0439e2b 100644 --- a/tools/testing/dart/status_reporter.dart +++ b/tools/testing/dart/status_reporter.dart @@ -16,7 +16,7 @@ final _combinations = { { 'runtimes': ['vm'], 'modes': ['debug', 'release'], - 'archs': ['ia32', 'x64', 'simarm', 'simmips'], + 'archs': ['ia32', 'x64', 'simarm'], 'compiler': 'none' }, { diff --git a/tools/utils.py b/tools/utils.py index 64a185494c3..62b07ca2056 100644 --- a/tools/utils.py +++ b/tools/utils.py @@ -76,8 +76,6 @@ def GuessArchitecture(): return 'arm' elif os_id.startswith('aarch64'): return 'arm64' - elif os_id.startswith('mips'): - return 'mips' elif '64' in os_id: return 'x64' elif (not os_id) or (not re.match('(x|i[3-6])86', os_id) is None): @@ -252,11 +250,9 @@ ARCH_FAMILY = { 'armv6': 'arm', 'armv5te': 'arm', 'arm64': 'arm', - 'mips': 'mips', 'simarm': 'ia32', 'simarmv6': 'ia32', 'simarmv5te': 'ia32', - 'simmips': 'ia32', 'simarm64': 'ia32', 'simdbc': 'ia32', 'simdbc64': 'ia32', @@ -620,9 +616,7 @@ def CheckedInSdkExecutable(): name = 'dart.exe' elif GuessOS() == 'linux': arch = GuessArchitecture() - if arch == 'mips': - name = 'dart-mips' - elif arch == 'arm': + if arch == 'arm': name = 'dart-arm' elif arch == 'arm64': name = 'dart-arm64' diff --git a/utils/tests/peg/peg.status b/utils/tests/peg/peg.status index 1b56c58f618..0dbf2daf945 100644 --- a/utils/tests/peg/peg.status +++ b/utils/tests/peg/peg.status @@ -13,9 +13,3 @@ [ $arch == simarm64 ] *: Skip - -[ $arch == mips ] -*: Skip - -[ $arch == simmips ] -*: Skip