- integrated ZScript backend

This commit is contained in:
Christoph Oelckers 2020-04-07 20:14:24 +02:00
parent 091d90aba5
commit 4d44682603
84 changed files with 66278 additions and 1 deletions

View file

@ -0,0 +1,112 @@
cmake_minimum_required(VERSION 2.8.7)
#make_release_only()
project(asmjit C)
set(ASMJITNAME asmjit)
add_definitions(-DASMJIT_BUILD_EMBED)
add_definitions(-DASMJIT_STATIC)
if(MSVC)
set(CMAKE_DEBUG_POSTFIX "d")
add_definitions(-D_CRT_NONSTDC_NO_DEPRECATE)
endif()
if(APPLE)
# Suppress stdlib.h:334:6: warning: pointer is missing a nullability type specifier (_Nonnull, _Nullable, or _Null_unspecified)
add_definitions(-Wno-nullability-completeness)
endif()
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
set(ASMJIT_PUBLIC_HDRS
asmjit/arm.h
asmjit/asmjit.h
asmjit/asmjit_apibegin.h
asmjit/asmjit_apiend.h
asmjit/asmjit_build.h
asmjit/base.h
asmjit/base/arch.h
asmjit/base/assembler.h
asmjit/base/codebuilder.h
asmjit/base/codecompiler.h
asmjit/base/codeemitter.h
asmjit/base/codeholder.h
asmjit/base/constpool.h
asmjit/base/cpuinfo.h
asmjit/base/func.h
asmjit/base/globals.h
asmjit/base/inst.h
asmjit/base/logging.h
asmjit/base/misc_p.h
asmjit/base/operand.h
asmjit/base/osutils.h
asmjit/base/regalloc_p.h
asmjit/base/runtime.h
asmjit/base/simdtypes.h
asmjit/base/string.h
asmjit/base/utils.h
asmjit/base/vmem.h
asmjit/base/zone.h
asmjit/x86.h
asmjit/x86/x86assembler.h
asmjit/x86/x86builder.h
asmjit/x86/x86compiler.h
asmjit/x86/x86emitter.h
asmjit/x86/x86globals.h
asmjit/x86/x86inst.h
asmjit/x86/x86instimpl_p.h
asmjit/x86/x86internal_p.h
asmjit/x86/x86logging_p.h
asmjit/x86/x86misc.h
asmjit/x86/x86operand.h
asmjit/x86/x86regalloc_p.h
)
set(ASMJIT_SRCS
asmjit/base/arch.cpp
asmjit/base/assembler.cpp
asmjit/base/codebuilder.cpp
asmjit/base/codecompiler.cpp
asmjit/base/codeemitter.cpp
asmjit/base/codeholder.cpp
asmjit/base/constpool.cpp
asmjit/base/cpuinfo.cpp
asmjit/base/func.cpp
asmjit/base/globals.cpp
asmjit/base/inst.cpp
asmjit/base/logging.cpp
asmjit/base/operand.cpp
asmjit/base/osutils.cpp
asmjit/base/regalloc.cpp
asmjit/base/runtime.cpp
asmjit/base/string.cpp
asmjit/base/utils.cpp
asmjit/base/vmem.cpp
asmjit/base/zone.cpp
asmjit/x86/x86assembler.cpp
asmjit/x86/x86builder.cpp
asmjit/x86/x86compiler.cpp
asmjit/x86/x86inst.cpp
asmjit/x86/x86instimpl.cpp
asmjit/x86/x86internal.cpp
asmjit/x86/x86logging.cpp
asmjit/x86/x86operand.cpp
asmjit/x86/x86operand_regs.cpp
asmjit/x86/x86regalloc.cpp
)
add_library(${ASMJITNAME} STATIC ${ASMJIT_SRCS} ${ASMJIT_PUBLIC_HDRS})
set_target_properties(${ASMJITNAME} PROPERTIES OUTPUT_NAME asmjit)
if(NOT SKIP_INSTALL_LIBRARIES AND NOT SKIP_INSTALL_ALL )
install(TARGETS ${ASMJITNAME}
RUNTIME DESTINATION bin
ARCHIVE DESTINATION lib
LIBRARY DESTINATION lib )
endif()
if(NOT SKIP_INSTALL_HEADERS AND NOT SKIP_INSTALL_ALL )
install(FILES ${ASMJIT_PUBLIC_HDRS} DESTINATION include)
endif()

View file

@ -0,0 +1,21 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_ARM_H
#define _ASMJIT_ARM_H
// [Dependencies]
#include "./base.h"
#include "./arm/armassembler.h"
#include "./arm/armbuilder.h"
#include "./arm/armcompiler.h"
#include "./arm/arminst.h"
#include "./arm/armoperand.h"
// [Guard]
#endif // _ASMJIT_ARM_H

View file

@ -0,0 +1,47 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_ASMJIT_H
#define _ASMJIT_ASMJIT_H
// ============================================================================
// [asmjit_mainpage]
// ============================================================================
//! \mainpage
//!
//! AsmJit - Complete x86/x64 JIT and Remote Assembler for C++.
//!
//! Introduction provided by the project page at https://github.com/asmjit/asmjit.
//! \defgroup asmjit_base AsmJit Base API (architecture independent)
//!
//! \brief Backend Neutral API.
//! \defgroup asmjit_x86 AsmJit X86/X64 API
//!
//! \brief X86/X64 Backend API.
//! \defgroup asmjit_arm AsmJit ARM32/ARM64 API
//!
//! \brief ARM32/ARM64 Backend API.
// [Dependencies]
#include "./base.h"
// [X86/X64]
#if defined(ASMJIT_BUILD_X86)
#include "./x86.h"
#endif // ASMJIT_BUILD_X86
// [ARM32/ARM64]
#if defined(ASMJIT_BUILD_ARM)
#include "./arm.h"
#endif // ASMJIT_BUILD_ARM
// [Guard]
#endif // _ASMJIT_ASMJIT_H

View file

@ -0,0 +1,117 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Dependencies]
#if !defined(_ASMJIT_BUILD_H)
# include "./build.h"
#endif // !_ASMJIT_BUILD_H
// [Guard]
#if !defined(ASMJIT_API_SCOPE)
# define ASMJIT_API_SCOPE
#else
# error "[asmjit] api-scope is already active, previous scope not closed by asmjit_apiend.h?"
#endif // ASMJIT_API_SCOPE
// ============================================================================
// [C++ Support]
// ============================================================================
// [NoExcept]
#if !ASMJIT_CC_HAS_NOEXCEPT && !defined(noexcept)
# define noexcept ASMJIT_NOEXCEPT
# define ASMJIT_UNDEF_NOEXCEPT
#endif // !ASMJIT_CC_HAS_NOEXCEPT && !noexcept
// [NullPtr]
#if !ASMJIT_CC_HAS_NULLPTR && !defined(nullptr)
# define nullptr NULL
# define ASMJIT_UNDEF_NULLPTR
#endif // !ASMJIT_CC_HAS_NULLPTR && !nullptr
// [Override]
#if !ASMJIT_CC_HAS_OVERRIDE && !defined(override)
# define override
# define ASMJIT_UNDEF_OVERRIDE
#endif // !ASMJIT_CC_HAS_OVERRIDE && !override
// ============================================================================
// [Compiler Support]
// ============================================================================
// [Clang]
#if ASMJIT_CC_CLANG
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wc++11-extensions"
# pragma clang diagnostic ignored "-Wconstant-logical-operand"
# pragma clang diagnostic ignored "-Wunnamed-type-template-args"
#endif // ASMJIT_CC_CLANG
// [GCC]
#if ASMJIT_CC_GCC
# pragma GCC diagnostic push
#endif // ASMJIT_CC_GCC
// [MSC]
#if ASMJIT_CC_MSC
# pragma warning(push)
# pragma warning(disable: 4127) // conditional expression is constant
# pragma warning(disable: 4201) // nameless struct/union
# pragma warning(disable: 4244) // '+=' : conversion from 'int' to 'x', possible loss of data
# pragma warning(disable: 4251) // struct needs to have dll-interface to be used by clients of struct ...
# pragma warning(disable: 4275) // non dll-interface struct ... used as base for dll-interface struct
# pragma warning(disable: 4355) // this used in base member initializer list
# pragma warning(disable: 4480) // specifying underlying type for enum
# pragma warning(disable: 4800) // forcing value to bool 'true' or 'false'
# if _MSC_VER < 1900
# if !defined(vsnprintf)
# define ASMJIT_UNDEF_VSNPRINTF
# define vsnprintf _vsnprintf
# endif // !vsnprintf
# if !defined(snprintf)
# define ASMJIT_UNDEF_SNPRINTF
# define snprintf _snprintf
# endif // !snprintf
# endif
#endif // ASMJIT_CC_MSC
// ============================================================================
// [Custom Macros]
// ============================================================================
// [ASMJIT_NON...]
#if ASMJIT_CC_HAS_DELETE_FUNCTION
#define ASMJIT_NONCONSTRUCTIBLE(...) \
private: \
__VA_ARGS__() = delete; \
__VA_ARGS__(const __VA_ARGS__& other) = delete; \
__VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
public:
#define ASMJIT_NONCOPYABLE(...) \
private: \
__VA_ARGS__(const __VA_ARGS__& other) = delete; \
__VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
public:
#else
#define ASMJIT_NONCONSTRUCTIBLE(...) \
private: \
inline __VA_ARGS__(); \
inline __VA_ARGS__(const __VA_ARGS__& other); \
inline __VA_ARGS__& operator=(const __VA_ARGS__& other); \
public:
#define ASMJIT_NONCOPYABLE(...) \
private: \
inline __VA_ARGS__(const __VA_ARGS__& other); \
inline __VA_ARGS__& operator=(const __VA_ARGS__& other); \
public:
#endif // ASMJIT_CC_HAS_DELETE_FUNCTION
// [ASMJIT_ENUM]
#if defined(_MSC_VER) && _MSC_VER >= 1400
# define ASMJIT_ENUM(NAME) enum NAME : uint32_t
#else
# define ASMJIT_ENUM(NAME) enum NAME
#endif

View file

@ -0,0 +1,74 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#if defined(ASMJIT_API_SCOPE)
# undef ASMJIT_API_SCOPE
#else
# error "[asmjit] api-scope not active, forgot to include asmjit_apibegin.h?"
#endif // ASMJIT_API_SCOPE
// ============================================================================
// [C++ Support]
// ============================================================================
// [NoExcept]
#if defined(ASMJIT_UNDEF_NOEXCEPT)
# undef noexcept
# undef ASMJIT_UNDEF_NOEXCEPT
#endif // ASMJIT_UNDEF_NOEXCEPT
// [NullPtr]
#if defined(ASMJIT_UNDEF_NULLPTR)
# undef nullptr
# undef ASMJIT_UNDEF_NULLPTR
#endif // ASMJIT_UNDEF_NULLPTR
// [Override]
#if defined(ASMJIT_UNDEF_OVERRIDE)
# undef override
# undef ASMJIT_UNDEF_OVERRIDE
#endif // ASMJIT_UNDEF_OVERRIDE
// ============================================================================
// [Compiler Support]
// ============================================================================
// [Clang]
#if ASMJIT_CC_CLANG
# pragma clang diagnostic pop
#endif // ASMJIT_CC_CLANG
// [GCC]
#if ASMJIT_CC_GCC
# pragma GCC diagnostic pop
#endif // ASMJIT_CC_GCC
// [MSC]
#if ASMJIT_CC_MSC
# pragma warning(pop)
# if _MSC_VER < 1900
# if defined(ASMJIT_UNDEF_VSNPRINTF)
# undef vsnprintf
# undef ASMJIT_UNDEF_VSNPRINTF
# endif // ASMJIT_UNDEF_VSNPRINTF
# if defined(ASMJIT_UNDEF_SNPRINTF)
# undef snprintf
# undef ASMJIT_UNDEF_SNPRINTF
# endif // ASMJIT_UNDEF_SNPRINTF
# endif
#endif // ASMJIT_CC_MSC
// ============================================================================
// [Custom Macros]
// ============================================================================
// [ASMJIT_NON...]
#undef ASMJIT_NONCONSTRUCTIBLE
#undef ASMJIT_NONCOPYABLE
// [ASMJIT_ENUM]
#undef ASMJIT_ENUM

View file

@ -0,0 +1,949 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BUILD_H
#define _ASMJIT_BUILD_H
// ============================================================================
// [asmjit::Build - Configuration]
// ============================================================================
// AsmJit is by default compiled only for a host processor for the purpose of
// JIT code generation. Both Assembler and CodeCompiler emitters are compiled
// by default. Preprocessor macros can be used to change the default behavior.
// External Config File
// --------------------
//
// Define in case your configuration is generated in an external file to be
// included.
#if defined(ASMJIT_CONFIG_FILE)
# include ASMJIT_CONFIG_FILE
#endif // ASMJIT_CONFIG_FILE
// AsmJit Static Builds and Embedding
// ----------------------------------
//
// These definitions can be used to enable static library build. Embed is used
// when AsmJit's source code is embedded directly in another project, implies
// static build as well.
//
// #define ASMJIT_EMBED // Asmjit is embedded (implies ASMJIT_STATIC).
// #define ASMJIT_STATIC // Define to enable static-library build.
// AsmJit Build Modes
// ------------------
//
// These definitions control the build mode and tracing support. The build mode
// should be auto-detected at compile time, but it's possible to override it in
// case that the auto-detection fails.
//
// Tracing is a feature that is never compiled by default and it's only used to
// debug AsmJit itself.
//
// #define ASMJIT_DEBUG // Define to enable debug-mode.
// #define ASMJIT_RELEASE // Define to enable release-mode.
// AsmJit Build Backends
// ---------------------
//
// These definitions control which backends to compile. If none of these is
// defined AsmJit will use host architecture by default (for JIT code generation).
//
// #define ASMJIT_BUILD_X86 // Define to enable X86 and X64 code-generation.
// #define ASMJIT_BUILD_ARM // Define to enable ARM32 and ARM64 code-generation.
// #define ASMJIT_BUILD_HOST // Define to enable host instruction set.
// AsmJit Build Features
// ---------------------
//
// Flags can be defined to disable standard features. These are handy especially
// when building AsmJit statically and some features are not needed or unwanted
// (like CodeCompiler).
//
// AsmJit features are enabled by default.
// #define ASMJIT_DISABLE_COMPILER // Disable CodeCompiler (completely).
// #define ASMJIT_DISABLE_LOGGING // Disable logging and formatting (completely).
// #define ASMJIT_DISABLE_TEXT // Disable everything that contains text
// // representation (instructions, errors, ...).
// #define ASMJIT_DISABLE_VALIDATION // Disable Validation (completely).
// Prevent compile-time errors caused by misconfiguration.
#if defined(ASMJIT_DISABLE_TEXT) && !defined(ASMJIT_DISABLE_LOGGING)
# error "[asmjit] ASMJIT_DISABLE_TEXT requires ASMJIT_DISABLE_LOGGING to be defined."
#endif // ASMJIT_DISABLE_TEXT && !ASMJIT_DISABLE_LOGGING
// Detect ASMJIT_DEBUG and ASMJIT_RELEASE if not forced from outside.
#if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE)
# if !defined(NDEBUG)
# define ASMJIT_DEBUG
# else
# define ASMJIT_RELEASE
# endif
#endif
// ASMJIT_EMBED implies ASMJIT_STATIC.
#if defined(ASMJIT_EMBED) && !defined(ASMJIT_STATIC)
# define ASMJIT_STATIC
#endif
// ============================================================================
// [asmjit::Build - VERSION]
// ============================================================================
// [@VERSION{@]
#define ASMJIT_VERSION_MAJOR 1
#define ASMJIT_VERSION_MINOR 0
#define ASMJIT_VERSION_PATCH 0
#define ASMJIT_VERSION_STRING "1.0.0"
// [@VERSION}@]
// ============================================================================
// [asmjit::Build - WIN32]
// ============================================================================
// [@WIN32_CRT_NO_DEPRECATE{@]
#if defined(_MSC_VER) && defined(ASMJIT_EXPORTS)
# if !defined(_CRT_SECURE_NO_DEPRECATE)
# define _CRT_SECURE_NO_DEPRECATE
# endif
# if !defined(_CRT_SECURE_NO_WARNINGS)
# define _CRT_SECURE_NO_WARNINGS
# endif
#endif
// [@WIN32_CRT_NO_DEPRECATE}@]
// [@WIN32_LEAN_AND_MEAN{@]
#if (defined(_WIN32) || defined(_WINDOWS)) && !defined(_WINDOWS_)
# if !defined(WIN32_LEAN_AND_MEAN)
# define WIN32_LEAN_AND_MEAN
# define ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
# endif
# if !defined(NOMINMAX)
# define NOMINMAX
# define ASMJIT_UNDEF_NOMINMAX
# endif
# include <windows.h>
# if defined(ASMJIT_UNDEF_NOMINMAX)
# undef NOMINMAX
# undef ASMJIT_UNDEF_NOMINMAX
# endif
# if defined(ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN)
# undef WIN32_LEAN_AND_MEAN
# undef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
# endif
#endif
// [@WIN32_LEAN_AND_MEAN}@]
// ============================================================================
// [asmjit::Build - OS]
// ============================================================================
// [@OS{@]
#if defined(_WIN32) || defined(_WINDOWS)
#define ASMJIT_OS_WINDOWS (1)
#else
#define ASMJIT_OS_WINDOWS (0)
#endif
#if defined(__APPLE__)
# include <TargetConditionals.h>
# define ASMJIT_OS_MAC (TARGET_OS_MAC)
# define ASMJIT_OS_IOS (TARGET_OS_IPHONE)
#else
# define ASMJIT_OS_MAC (0)
# define ASMJIT_OS_IOS (0)
#endif
#if defined(__ANDROID__)
# define ASMJIT_OS_ANDROID (1)
#else
# define ASMJIT_OS_ANDROID (0)
#endif
#if defined(__linux__) || defined(__ANDROID__)
# define ASMJIT_OS_LINUX (1)
#else
# define ASMJIT_OS_LINUX (0)
#endif
#if defined(__DragonFly__)
# define ASMJIT_OS_DRAGONFLYBSD (1)
#else
# define ASMJIT_OS_DRAGONFLYBSD (0)
#endif
#if defined(__FreeBSD__)
# define ASMJIT_OS_FREEBSD (1)
#else
# define ASMJIT_OS_FREEBSD (0)
#endif
#if defined(__NetBSD__)
# define ASMJIT_OS_NETBSD (1)
#else
# define ASMJIT_OS_NETBSD (0)
#endif
#if defined(__OpenBSD__)
# define ASMJIT_OS_OPENBSD (1)
#else
# define ASMJIT_OS_OPENBSD (0)
#endif
#if defined(__QNXNTO__)
# define ASMJIT_OS_QNX (1)
#else
# define ASMJIT_OS_QNX (0)
#endif
#if defined(__sun)
# define ASMJIT_OS_SOLARIS (1)
#else
# define ASMJIT_OS_SOLARIS (0)
#endif
#if defined(__CYGWIN__)
# define ASMJIT_OS_CYGWIN (1)
#else
# define ASMJIT_OS_CYGWIN (0)
#endif
#define ASMJIT_OS_BSD ( \
ASMJIT_OS_FREEBSD || \
ASMJIT_OS_DRAGONFLYBSD || \
ASMJIT_OS_NETBSD || \
ASMJIT_OS_OPENBSD || \
ASMJIT_OS_MAC)
#define ASMJIT_OS_POSIX (!ASMJIT_OS_WINDOWS)
// [@OS}@]
// ============================================================================
// [asmjit::Build - ARCH]
// ============================================================================
// [@ARCH{@]
// \def ASMJIT_ARCH_ARM32
// True if the target architecture is a 32-bit ARM.
//
// \def ASMJIT_ARCH_ARM64
// True if the target architecture is a 64-bit ARM.
//
// \def ASMJIT_ARCH_X86
// True if the target architecture is a 32-bit X86/IA32
//
// \def ASMJIT_ARCH_X64
// True if the target architecture is a 64-bit X64/AMD64
//
// \def ASMJIT_ARCH_LE
// True if the target architecture is little endian.
//
// \def ASMJIT_ARCH_BE
// True if the target architecture is big endian.
//
// \def ASMJIT_ARCH_64BIT
// True if the target architecture is 64-bit.
#if (defined(_M_X64 ) || defined(__x86_64) || defined(__x86_64__) || \
defined(_M_AMD64) || defined(__amd64 ) || defined(__amd64__ ))
# define ASMJIT_ARCH_X64 1
#else
# define ASMJIT_ARCH_X64 0
#endif
#if (defined(_M_IX86 ) || defined(__X86__ ) || defined(__i386 ) || \
defined(__IA32__) || defined(__I86__ ) || defined(__i386__) || \
defined(__i486__) || defined(__i586__) || defined(__i686__))
# define ASMJIT_ARCH_X86 (!ASMJIT_ARCH_X64)
#else
# define ASMJIT_ARCH_X86 0
#endif
#if defined(__aarch64__)
# define ASMJIT_ARCH_ARM64 1
#else
# define ASMJIT_ARCH_ARM64 0
#endif
#if (defined(_M_ARM ) || defined(__arm ) || defined(__thumb__ ) || \
defined(_M_ARMT ) || defined(__arm__ ) || defined(__thumb2__))
# define ASMJIT_ARCH_ARM32 (!ASMJIT_ARCH_ARM64)
#else
# define ASMJIT_ARCH_ARM32 0
#endif
#define ASMJIT_ARCH_LE ( \
ASMJIT_ARCH_X86 || \
ASMJIT_ARCH_X64 || \
ASMJIT_ARCH_ARM32 || \
ASMJIT_ARCH_ARM64 )
#define ASMJIT_ARCH_BE (!(ASMJIT_ARCH_LE))
#define ASMJIT_ARCH_64BIT (ASMJIT_ARCH_X64 || ASMJIT_ARCH_ARM64)
// [@ARCH}@]
// [@ARCH_UNALIGNED_RW{@]
// \def ASMJIT_ARCH_UNALIGNED_16
// True if the target architecture allows unaligned 16-bit reads and writes.
//
// \def ASMJIT_ARCH_UNALIGNED_32
// True if the target architecture allows unaligned 32-bit reads and writes.
//
// \def ASMJIT_ARCH_UNALIGNED_64
// True if the target architecture allows unaligned 64-bit reads and writes.
#define ASMJIT_ARCH_UNALIGNED_16 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64)
#define ASMJIT_ARCH_UNALIGNED_32 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64)
#define ASMJIT_ARCH_UNALIGNED_64 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64)
// [@ARCH_UNALIGNED_RW}@]
// ============================================================================
// [asmjit::Build - CC]
// ============================================================================
// [@CC{@]
// \def ASMJIT_CC_CLANG
// Non-zero if the detected C++ compiler is CLANG (contains normalized CLANG version).
//
// \def ASMJIT_CC_CODEGEAR
// Non-zero if the detected C++ compiler is CODEGEAR or BORLAND (version not normalized).
//
// \def ASMJIT_CC_INTEL
// Non-zero if the detected C++ compiler is INTEL (version not normalized).
//
// \def ASMJIT_CC_GCC
// Non-zero if the detected C++ compiler is GCC (contains normalized GCC version).
//
// \def ASMJIT_CC_MSC
// Non-zero if the detected C++ compiler is MSC (contains normalized MSC version).
//
// \def ASMJIT_CC_MINGW
// Non-zero if the detected C++ compiler is MINGW32 (set to 32) or MINGW64 (set to 64).
#define ASMJIT_CC_CLANG 0
#define ASMJIT_CC_CODEGEAR 0
#define ASMJIT_CC_GCC 0
#define ASMJIT_CC_INTEL 0
#define ASMJIT_CC_MSC 0
// Intel masquerades as GCC, so check for it first.
#if defined(__INTEL_COMPILER)
# undef ASMJIT_CC_INTEL
# define ASMJIT_CC_INTEL __INTEL_COMPILER
#elif defined(__CODEGEARC__)
# undef ASMJIT_CC_CODEGEAR
# define ASMJIT_CC_CODEGEAR (__CODEGEARC__)
#elif defined(__BORLANDC__)
# undef ASMJIT_CC_CODEGEAR
# define ASMJIT_CC_CODEGEAR (__BORLANDC__)
#elif defined(__clang__) && defined(__clang_minor__)
# undef ASMJIT_CC_CLANG
# define ASMJIT_CC_CLANG (__clang_major__ * 10000000 + __clang_minor__ * 100000 + __clang_patchlevel__)
#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
# undef ASMJIT_CC_GCC
# define ASMJIT_CC_GCC (__GNUC__ * 10000000 + __GNUC_MINOR__ * 100000 + __GNUC_PATCHLEVEL__)
#elif defined(_MSC_VER) && defined(_MSC_FULL_VER)
# undef ASMJIT_CC_MSC
# if _MSC_VER == _MSC_FULL_VER / 10000
# define ASMJIT_CC_MSC (_MSC_VER * 100000 + (_MSC_FULL_VER % 10000))
# else
# define ASMJIT_CC_MSC (_MSC_VER * 100000 + (_MSC_FULL_VER % 100000))
# endif
#else
# error "[asmjit] Unable to detect the C/C++ compiler."
#endif
#if ASMJIT_CC_INTEL && (defined(__GNUC__) || defined(__clang__))
# define ASMJIT_CC_INTEL_COMPAT_MODE 1
# else
# define ASMJIT_CC_INTEL_COMPAT_MODE 0
#endif
#define ASMJIT_CC_CODEGEAR_EQ(x, y) (ASMJIT_CC_CODEGEAR == (((x) << 8) + (y)))
#define ASMJIT_CC_CODEGEAR_GE(x, y) (ASMJIT_CC_CODEGEAR >= (((x) << 8) + (y)))
#define ASMJIT_CC_CLANG_EQ(x, y, z) (ASMJIT_CC_CLANG == ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_CLANG_GE(x, y, z) (ASMJIT_CC_CLANG >= ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_GCC_EQ(x, y, z) (ASMJIT_CC_GCC == ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_GCC_GE(x, y, z) (ASMJIT_CC_GCC >= ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_INTEL_EQ(x, y) (ASMJIT_CC_INTEL == (((x) * 100) + (y)))
#define ASMJIT_CC_INTEL_GE(x, y) (ASMJIT_CC_INTEL >= (((x) * 100) + (y)))
#define ASMJIT_CC_MSC_EQ(x, y, z) (ASMJIT_CC_MSC == ((x) * 10000000 + (y) * 100000 + (z)))
#define ASMJIT_CC_MSC_GE(x, y, z) (ASMJIT_CC_MSC >= ((x) * 10000000 + (y) * 100000 + (z)))
#if defined(__MINGW64__)
# define ASMJIT_CC_MINGW 64
#elif defined(__MINGW32__)
# define ASMJIT_CC_MINGW 32
#else
# define ASMJIT_CC_MINGW 0
#endif
#if defined(__cplusplus)
# if __cplusplus >= 201103L
# define ASMJIT_CC_CXX_VERSION __cplusplus
# elif defined(__GXX_EXPERIMENTAL_CXX0X__) || ASMJIT_CC_MSC_GE(18, 0, 0) || ASMJIT_CC_INTEL_GE(14, 0)
# define ASMJIT_CC_CXX_VERSION 201103L
# else
# define ASMJIT_CC_CXX_VERSION 199711L
# endif
#endif
#if !defined(ASMJIT_CC_CXX_VERSION)
# define ASMJIT_CC_CXX_VERSION 0
#endif
// [@CC}@]
// [@CC_FEATURES{@]
#if ASMJIT_CC_CLANG
# define ASMJIT_CC_HAS_ATTRIBUTE (1)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (__has_attribute(__aligned__))
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(__always_inline__))
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (__has_attribute(__noinline__))
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (__has_attribute(__noreturn__))
# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (__has_attribute(__optimize__))
# define ASMJIT_CC_HAS_BUILTIN_ASSUME (__has_builtin(__builtin_assume))
# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned))
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (__has_builtin(__builtin_unreachable))
# define ASMJIT_CC_HAS_ALIGNAS (__has_extension(__cxx_alignas__))
# define ASMJIT_CC_HAS_ALIGNOF (__has_extension(__cxx_alignof__))
# define ASMJIT_CC_HAS_CONSTEXPR (__has_extension(__cxx_constexpr__))
# define ASMJIT_CC_HAS_DECLTYPE (__has_extension(__cxx_decltype__))
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (__has_extension(__cxx_defaulted_functions__))
# define ASMJIT_CC_HAS_DELETE_FUNCTION (__has_extension(__cxx_deleted_functions__))
# define ASMJIT_CC_HAS_FINAL (__has_extension(__cxx_override_control__))
# define ASMJIT_CC_HAS_INITIALIZER_LIST (__has_extension(__cxx_generalized_initializers__))
# define ASMJIT_CC_HAS_LAMBDA (__has_extension(__cxx_lambdas__))
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (__has_extension(__cxx_unicode_literals__))
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (__has_extension(__cxx_unicode_literals__))
# define ASMJIT_CC_HAS_NOEXCEPT (__has_extension(__cxx_noexcept__))
# define ASMJIT_CC_HAS_NULLPTR (__has_extension(__cxx_nullptr__))
# define ASMJIT_CC_HAS_OVERRIDE (__has_extension(__cxx_override_control__))
# define ASMJIT_CC_HAS_RVALUE (__has_extension(__cxx_rvalue_references__))
# define ASMJIT_CC_HAS_STATIC_ASSERT (__has_extension(__cxx_static_assert__))
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (__has_extension(__cxx_variadic_templates__))
#endif
#if ASMJIT_CC_CODEGEAR
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_ALIGNAS (0)
# define ASMJIT_CC_HAS_ALIGNOF (0)
# define ASMJIT_CC_HAS_CONSTEXPR (0)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (0)
# define ASMJIT_CC_HAS_DELETE_FUNCTION (0)
# define ASMJIT_CC_HAS_FINAL (0)
# define ASMJIT_CC_HAS_INITIALIZER_LIST (0)
# define ASMJIT_CC_HAS_LAMBDA (0)
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (0)
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (0)
# define ASMJIT_CC_HAS_NOEXCEPT (0)
# define ASMJIT_CC_HAS_NULLPTR (0)
# define ASMJIT_CC_HAS_OVERRIDE (0)
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_CODEGEAR >= 0x0610)
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (0)
#endif
#if ASMJIT_CC_GCC
# define ASMJIT_CC_HAS_ATTRIBUTE (1)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_GCC_GE(2, 7, 0))
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_GCC_GE(4, 4, 0) && !ASMJIT_CC_MINGW)
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_GCC_GE(3, 4, 0) && !ASMJIT_CC_MINGW)
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_GCC_GE(2, 5, 0))
# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (ASMJIT_CC_GCC_GE(4, 4, 0))
# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0)
# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (ASMJIT_CC_GCC_GE(4, 7, 0))
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (1)
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
#endif
#if ASMJIT_CC_INTEL
# define ASMJIT_CC_HAS_ATTRIBUTE (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (ASMJIT_CC_INTEL_COMPAT_MODE)
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
# define ASMJIT_CC_HAS_ASSUME (1)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (1)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_INTEL >= 1500)
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_INTEL >= 1500)
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_INTEL >= 1200)
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_INTEL >= 1200)
# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_INTEL >= 1200)
# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_INTEL >= 1200)
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_INTEL >= 1400 || (ASMJIT_CC_INTEL_COMPAT_MODE > 0 && ASMJIT_CC_INTEL >= 1206))
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_INTEL >= 1400 || (ASMJIT_CC_INTEL_COMPAT_MODE > 0 && ASMJIT_CC_INTEL >= 1206))
# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_INTEL >= 1206)
# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_INTEL >= 1400)
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_INTEL >= 1110)
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_INTEL >= 1110)
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_INTEL >= 1206)
#endif
#if ASMJIT_CC_MSC
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (1)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (1)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (1)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (1)
# define ASMJIT_CC_HAS_ASSUME (1)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0))
# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0))
# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_MSC_GE(14, 0, 0))
# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_MSC_GE(18, 0, 0))
# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
# if defined(_NATIVE_WCHAR_T_DEFINED)
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
# else
# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (0)
# endif
# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_MSC_GE(19, 0, 0))
# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_MSC_GE(14, 0, 0))
# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_MSC_GE(16, 0, 0))
# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_MSC_GE(18, 0, 0))
#endif
// Fixup some vendor specific keywords.
#if !defined(ASMJIT_CC_HAS_ASSUME)
# define ASMJIT_CC_HAS_ASSUME (0)
#endif
#if !defined(ASMJIT_CC_HAS_ASSUME_ALIGNED)
# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
#endif
// Fixup compilers that don't support '__attribute__'.
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE)
# define ASMJIT_CC_HAS_ATTRIBUTE (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE)
# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE)
# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_NORETURN)
# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (0)
#endif
#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE)
# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (0)
#endif
// Fixup compilers that don't support '__builtin?'.
#if !defined(ASMJIT_CC_HAS_BUILTIN_ASSUME)
# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0)
#endif
#if !defined(ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED)
# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (0)
#endif
#if !defined(ASMJIT_CC_HAS_BUILTIN_EXPECT)
# define ASMJIT_CC_HAS_BUILTIN_EXPECT (0)
#endif
#if !defined(ASMJIT_CC_HAS_BUILTIN_UNREACHABLE)
# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (0)
#endif
// Fixup compilers that don't support 'declspec'.
#if !defined(ASMJIT_CC_HAS_DECLSPEC_ALIGN)
# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (0)
#endif
#if !defined(ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE)
# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0)
#endif
#if !defined(ASMJIT_CC_HAS_DECLSPEC_NOINLINE)
# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0)
#endif
#if !defined(ASMJIT_CC_HAS_DECLSPEC_NORETURN)
# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (0)
#endif
// [@CC_FEATURES}@]
// [@CC_API{@]
// \def ASMJIT_API
// The decorated function is asmjit API and should be exported.
#if !defined(ASMJIT_API)
# if defined(ASMJIT_STATIC)
# define ASMJIT_API
# elif ASMJIT_OS_WINDOWS
# if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !ASMJIT_CC_MINGW
# if defined(ASMJIT_EXPORTS)
# define ASMJIT_API __attribute__((__dllexport__))
# else
# define ASMJIT_API __attribute__((__dllimport__))
# endif
# else
# if defined(ASMJIT_EXPORTS)
# define ASMJIT_API __declspec(dllexport)
# else
# define ASMJIT_API __declspec(dllimport)
# endif
# endif
# else
# if ASMJIT_CC_CLANG || ASMJIT_CC_GCC_GE(4, 0, 0) || ASMJIT_CC_INTEL
# define ASMJIT_API __attribute__((__visibility__("default")))
# endif
# endif
#endif
// [@CC_API}@]
// [@CC_VARAPI{@]
// \def ASMJIT_VARAPI
// The decorated variable is part of asmjit API and is exported.
#if !defined(ASMJIT_VARAPI)
# define ASMJIT_VARAPI extern ASMJIT_API
#endif
// [@CC_VARAPI}@]
// [@CC_VIRTAPI{@]
// \def ASMJIT_VIRTAPI
// The decorated class has a virtual table and is part of asmjit API.
//
// This is basically a workaround. When using MSVC and marking class as DLL
// export everything gets exported, which is unwanted in most projects. MSVC
// automatically exports typeinfo and vtable if at least one symbol of the
// class is exported. However, GCC has some strange behavior that even if
// one or more symbol is exported it doesn't export typeinfo unless the
// class itself is decorated with "visibility(default)" (i.e. asmjit_API).
#if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !ASMJIT_OS_WINDOWS
# define ASMJIT_VIRTAPI ASMJIT_API
#else
# define ASMJIT_VIRTAPI
#endif
// [@CC_VIRTAPI}@]
// [@CC_INLINE{@]
// \def ASMJIT_INLINE
// Always inline the decorated function.
#if ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE
# define ASMJIT_INLINE inline __attribute__((__always_inline__))
#elif ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE
# define ASMJIT_INLINE __forceinline
#else
# define ASMJIT_INLINE inline
#endif
// [@CC_INLINE}@]
// [@CC_NOINLINE{@]
// \def ASMJIT_NOINLINE
// Never inline the decorated function.
#if ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE
# define ASMJIT_NOINLINE __attribute__((__noinline__))
#elif ASMJIT_CC_HAS_DECLSPEC_NOINLINE
# define ASMJIT_NOINLINE __declspec(noinline)
#else
# define ASMJIT_NOINLINE
#endif
// [@CC_NOINLINE}@]
// [@CC_NORETURN{@]
// \def ASMJIT_NORETURN
// The decorated function never returns (exit, assertion failure, etc...).
#if ASMJIT_CC_HAS_ATTRIBUTE_NORETURN
# define ASMJIT_NORETURN __attribute__((__noreturn__))
#elif ASMJIT_CC_HAS_DECLSPEC_NORETURN
# define ASMJIT_NORETURN __declspec(noreturn)
#else
# define ASMJIT_NORETURN
#endif
// [@CC_NORETURN}@]
// [@CC_CDECL{@]
// \def ASMJIT_CDECL
// Standard C function calling convention decorator (__cdecl).
#if ASMJIT_ARCH_X86
# if ASMJIT_CC_HAS_ATTRIBUTE
# define ASMJIT_CDECL __attribute__((__cdecl__))
# else
# define ASMJIT_CDECL __cdecl
# endif
#else
# define ASMJIT_CDECL
#endif
// [@CC_CDECL}@]
// [@CC_STDCALL{@]
// \def ASMJIT_STDCALL
// StdCall function calling convention decorator (__stdcall).
#if ASMJIT_ARCH_X86
# if ASMJIT_CC_HAS_ATTRIBUTE
# define ASMJIT_STDCALL __attribute__((__stdcall__))
# else
# define ASMJIT_STDCALL __stdcall
# endif
#else
# define ASMJIT_STDCALL
#endif
// [@CC_STDCALL}@]
// [@CC_FASTCALL{@]
// \def ASMJIT_FASTCALL
// FastCall function calling convention decorator (__fastcall).
#if ASMJIT_ARCH_X86
# if ASMJIT_CC_HAS_ATTRIBUTE
# define ASMJIT_FASTCALL __attribute__((__fastcall__))
# else
# define ASMJIT_FASTCALL __fastcall
# endif
#else
# define ASMJIT_FASTCALL
#endif
// [@CC_FASTCALL}@]
// [@CC_REGPARM{@]
// \def ASMJIT_REGPARM(n)
// A custom calling convention which passes n arguments in registers.
#if ASMJIT_ARCH_X86 && ASMJIT_CC_HAS_ATTRIBUTE
# define ASMJIT_REGPARM(n) __attribute__((__regparm__(n)))
#else
# define ASMJIT_REGPARM(n)
#endif
// [@CC_REGPARM}@]
// [@CC_NOEXCEPT{@]
// \def ASMJIT_NOEXCEPT
// The decorated function never throws an exception (noexcept).
#if ASMJIT_CC_HAS_NOEXCEPT
# define ASMJIT_NOEXCEPT noexcept
#else
# define ASMJIT_NOEXCEPT
#endif
// [@CC_NOEXCEPT}@]
// [@CC_NOP{@]
// \def ASMJIT_NOP
// No operation.
#if !defined(ASMJIT_NOP)
# define ASMJIT_NOP ((void)0)
#endif
// [@CC_NOP}@]
// [@CC_ASSUME{@]
// \def ASMJIT_ASSUME(exp)
// Assume that the expression exp is always true.
#if ASMJIT_CC_HAS_ASSUME
# define ASMJIT_ASSUME(exp) __assume(exp)
#elif ASMJIT_CC_HAS_BUILTIN_ASSUME
# define ASMJIT_ASSUME(exp) __builtin_assume(exp)
#elif ASMJIT_CC_HAS_BUILTIN_UNREACHABLE
# define ASMJIT_ASSUME(exp) do { if (!(exp)) __builtin_unreachable(); } while (0)
#else
# define ASMJIT_ASSUME(exp) ((void)0)
#endif
// [@CC_ASSUME}@]
// [@CC_ASSUME_ALIGNED{@]
// \def ASMJIT_ASSUME_ALIGNED(p, alignment)
// Assume that the pointer 'p' is aligned to at least 'alignment' bytes.
#if ASMJIT_CC_HAS_ASSUME_ALIGNED
# define ASMJIT_ASSUME_ALIGNED(p, alignment) __assume_aligned(p, alignment)
#elif ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED
# define ASMJIT_ASSUME_ALIGNED(p, alignment) p = __builtin_assume_aligned(p, alignment)
#else
# define ASMJIT_ASSUME_ALIGNED(p, alignment) ((void)0)
#endif
// [@CC_ASSUME_ALIGNED}@]
// [@CC_EXPECT{@]
// \def ASMJIT_LIKELY(exp)
// Expression exp is likely to be true.
//
// \def ASMJIT_UNLIKELY(exp)
// Expression exp is likely to be false.
#if ASMJIT_CC_HAS_BUILTIN_EXPECT
# define ASMJIT_LIKELY(exp) __builtin_expect(!!(exp), 1)
# define ASMJIT_UNLIKELY(exp) __builtin_expect(!!(exp), 0)
#else
# define ASMJIT_LIKELY(exp) (exp)
# define ASMJIT_UNLIKELY(exp) (exp)
#endif
// [@CC_EXPECT}@]
// [@CC_FALLTHROUGH{@]
// \def ASMJIT_FALLTHROUGH
// The code falls through annotation (switch / case).
#if ASMJIT_CC_CLANG && __cplusplus >= 201103L
# define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
#else
# define ASMJIT_FALLTHROUGH (void)0
#endif
// [@CC_FALLTHROUGH}@]
// [@CC_UNUSED{@]
// \def ASMJIT_UNUSED(x)
// Mark a variable x as unused.
#define ASMJIT_UNUSED(x) (void)(x)
// [@CC_UNUSED}@]
// [@CC_OFFSET_OF{@]
// \def ASMJIT_OFFSET_OF(x, y).
// Get the offset of a member y of a struct x at compile-time.
#define ASMJIT_OFFSET_OF(x, y) ((int)(intptr_t)((const char*)&((const x*)0x1)->y) - 1)
// [@CC_OFFSET_OF}@]
// [@CC_ARRAY_SIZE{@]
// \def ASMJIT_ARRAY_SIZE(x)
// Get the array size of x at compile-time.
#define ASMJIT_ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
// [@CC_ARRAY_SIZE}@]
// ============================================================================
// [asmjit::Build - STDTYPES]
// ============================================================================
// [@STDTYPES{@]
#if defined(__MINGW32__) || defined(__MINGW64__)
# include <sys/types.h>
#endif
#if defined(_MSC_VER) && (_MSC_VER < 1600)
# include <limits.h>
# if !defined(ASMJIT_SUPPRESS_STD_TYPES)
# if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef signed __int64 int64_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned __int64 uint64_t;
# else
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
# endif
# endif
#else
# include <stdint.h>
# include <limits.h>
#endif
// [@STDTYPES}@]
// ============================================================================
// [asmjit::Build - Dependencies]
// ============================================================================
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <new>
#if ASMJIT_OS_POSIX
# include <pthread.h>
#endif // ASMJIT_OS_POSIX
// ============================================================================
// [asmjit::Build - Additional]
// ============================================================================
// Build host architecture if no architecture is selected.
#if !defined(ASMJIT_BUILD_HOST) && \
!defined(ASMJIT_BUILD_X86) && \
!defined(ASMJIT_BUILD_ARM)
# define ASMJIT_BUILD_HOST
#endif
// Detect host architecture if building only for host.
#if defined(ASMJIT_BUILD_HOST)
# if (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && !defined(ASMJIT_BUILD_X86)
# define ASMJIT_BUILD_X86
# endif // ASMJIT_ARCH_X86
#endif // ASMJIT_BUILD_HOST
#if ASMJIT_CC_MSC
# define ASMJIT_UINT64_C(x) x##ui64
#else
# define ASMJIT_UINT64_C(x) x##ull
#endif
#if ASMJIT_ARCH_LE
# define ASMJIT_PACK32_4x8(A, B, C, D) ((A) + ((B) << 8) + ((C) << 16) + ((D) << 24))
#else
# define ASMJIT_PACK32_4x8(A, B, C, D) ((D) + ((C) << 8) + ((B) << 16) + ((A) << 24))
#endif
// Internal macros that are only used when building AsmJit itself.
#if defined(ASMJIT_EXPORTS)
# if !defined(ASMJIT_DEBUG) && ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE
# define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
# else
# define ASMJIT_FAVOR_SIZE
# endif
#endif // ASMJIT_EXPORTS
// ============================================================================
// [asmjit::Build - Test]
// ============================================================================
// Include a unit testing package if this is a `asmjit_test` build.
#if defined(ASMJIT_TEST)
# include "../../test/broken.h"
#endif // ASMJIT_TEST
// [Guard]
#endif // _ASMJIT_BUILD_H

View file

@ -0,0 +1,34 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_H
#define _ASMJIT_BASE_H
// [Dependencies]
#include "./base/arch.h"
#include "./base/assembler.h"
#include "./base/codebuilder.h"
#include "./base/codecompiler.h"
#include "./base/codeemitter.h"
#include "./base/codeholder.h"
#include "./base/constpool.h"
#include "./base/cpuinfo.h"
#include "./base/func.h"
#include "./base/globals.h"
#include "./base/inst.h"
#include "./base/logging.h"
#include "./base/operand.h"
#include "./base/osutils.h"
#include "./base/runtime.h"
#include "./base/simdtypes.h"
#include "./base/string.h"
#include "./base/utils.h"
#include "./base/vmem.h"
#include "./base/zone.h"
// [Guard]
#endif // _ASMJIT_BASE_H

View file

@ -0,0 +1,161 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/arch.h"
#if defined(ASMJIT_BUILD_X86)
#include "../x86/x86operand.h"
#endif // ASMJIT_BUILD_X86
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::ArchInfo]
// ============================================================================
static const uint32_t archInfoTable[] = {
// <-------------+---------------------+-----------------------+-------+
// | Type | SubType | GPInfo|
// <-------------+---------------------+-----------------------+-------+
ASMJIT_PACK32_4x8(ArchInfo::kTypeNone , ArchInfo::kSubTypeNone, 0, 0),
ASMJIT_PACK32_4x8(ArchInfo::kTypeX86 , ArchInfo::kSubTypeNone, 4, 8),
ASMJIT_PACK32_4x8(ArchInfo::kTypeX64 , ArchInfo::kSubTypeNone, 8, 16),
ASMJIT_PACK32_4x8(ArchInfo::kTypeX32 , ArchInfo::kSubTypeNone, 8, 16),
ASMJIT_PACK32_4x8(ArchInfo::kTypeA32 , ArchInfo::kSubTypeNone, 4, 16),
ASMJIT_PACK32_4x8(ArchInfo::kTypeA64 , ArchInfo::kSubTypeNone, 8, 32)
};
ASMJIT_FAVOR_SIZE void ArchInfo::init(uint32_t type, uint32_t subType) noexcept {
uint32_t index = type < ASMJIT_ARRAY_SIZE(archInfoTable) ? type : uint32_t(0);
// Make sure the `archInfoTable` array is correctly indexed.
_signature = archInfoTable[index];
ASMJIT_ASSERT(_type == index);
// Even if the architecture is not known we setup its type and sub-type,
// however, such architecture is not really useful.
_type = type;
_subType = subType;
}
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t archType, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept {
uint32_t typeId = typeIdInOut;
// Zero the signature so it's clear in case that typeId is not invalid.
regInfo._signature = 0;
#if defined(ASMJIT_BUILD_X86)
if (ArchInfo::isX86Family(archType)) {
// Passed RegType instead of TypeId?
if (typeId <= Reg::kRegMax)
typeId = x86OpData.archRegs.regTypeToTypeId[typeId];
if (ASMJIT_UNLIKELY(!TypeId::isValid(typeId)))
return DebugUtils::errored(kErrorInvalidTypeId);
// First normalize architecture dependent types.
if (TypeId::isAbstract(typeId)) {
if (typeId == TypeId::kIntPtr)
typeId = (archType == ArchInfo::kTypeX86) ? TypeId::kI32 : TypeId::kI64;
else
typeId = (archType == ArchInfo::kTypeX86) ? TypeId::kU32 : TypeId::kU64;
}
// Type size helps to construct all kinds of registers. If the size is zero
// then the TypeId is invalid.
uint32_t size = TypeId::sizeOf(typeId);
if (ASMJIT_UNLIKELY(!size))
return DebugUtils::errored(kErrorInvalidTypeId);
if (ASMJIT_UNLIKELY(typeId == TypeId::kF80))
return DebugUtils::errored(kErrorInvalidUseOfF80);
uint32_t regType = 0;
switch (typeId) {
case TypeId::kI8:
case TypeId::kU8:
regType = X86Reg::kRegGpbLo;
break;
case TypeId::kI16:
case TypeId::kU16:
regType = X86Reg::kRegGpw;
break;
case TypeId::kI32:
case TypeId::kU32:
regType = X86Reg::kRegGpd;
break;
case TypeId::kI64:
case TypeId::kU64:
if (archType == ArchInfo::kTypeX86)
return DebugUtils::errored(kErrorInvalidUseOfGpq);
regType = X86Reg::kRegGpq;
break;
// F32 and F64 are always promoted to use vector registers.
case TypeId::kF32:
typeId = TypeId::kF32x1;
regType = X86Reg::kRegXmm;
break;
case TypeId::kF64:
typeId = TypeId::kF64x1;
regType = X86Reg::kRegXmm;
break;
// Mask registers {k}.
case TypeId::kMask8:
case TypeId::kMask16:
case TypeId::kMask32:
case TypeId::kMask64:
regType = X86Reg::kRegK;
break;
// MMX registers.
case TypeId::kMmx32:
case TypeId::kMmx64:
regType = X86Reg::kRegMm;
break;
// XMM|YMM|ZMM registers.
default:
if (size <= 16)
regType = X86Reg::kRegXmm;
else if (size == 32)
regType = X86Reg::kRegYmm;
else
regType = X86Reg::kRegZmm;
break;
}
typeIdInOut = typeId;
regInfo._signature = x86OpData.archRegs.regInfo[regType].getSignature();
return kErrorOk;
}
#endif // ASMJIT_BUILD_X86
return DebugUtils::errored(kErrorInvalidArch);
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,199 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_ARCH_H
#define _ASMJIT_BASE_ARCH_H
// [Dependencies]
#include "../base/globals.h"
#include "../base/operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::ArchInfo]
// ============================================================================
class ArchInfo {
public:
//! Architecture type.
ASMJIT_ENUM(Type) {
kTypeNone = 0, //!< No/Unknown architecture.
// X86 architectures.
kTypeX86 = 1, //!< X86 architecture (32-bit).
kTypeX64 = 2, //!< X64 architecture (64-bit) (AMD64).
kTypeX32 = 3, //!< X32 architecture (DEAD-END).
// ARM architectures.
kTypeA32 = 4, //!< ARM 32-bit architecture (AArch32/ARM/THUMB).
kTypeA64 = 5, //!< ARM 64-bit architecture (AArch64).
//! Architecture detected at compile-time (architecture of the host).
kTypeHost = ASMJIT_ARCH_X86 ? kTypeX86 :
ASMJIT_ARCH_X64 ? kTypeX64 :
ASMJIT_ARCH_ARM32 ? kTypeA32 :
ASMJIT_ARCH_ARM64 ? kTypeA64 : kTypeNone
};
//! Architecture sub-type or execution mode.
ASMJIT_ENUM(SubType) {
kSubTypeNone = 0, //!< Default mode (or no specific mode).
// X86 sub-types.
kSubTypeX86_AVX = 1, //!< Code generation uses AVX by default (VEC instructions).
kSubTypeX86_AVX2 = 2, //!< Code generation uses AVX2 by default (VEC instructions).
kSubTypeX86_AVX512 = 3, //!< Code generation uses AVX-512F by default (+32 vector regs).
kSubTypeX86_AVX512VL = 4, //!< Code generation uses AVX-512F-VL by default (+VL extensions).
// ARM sub-types.
kSubTypeA32_Thumb = 8, //!< THUMB|THUMB2 sub-type (only ARM in 32-bit mode).
#if (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX512VL__)
kSubTypeHost = kSubTypeX86_AVX512VL
#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX512F__)
kSubTypeHost = kSubTypeX86_AVX512
#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX2__)
kSubTypeHost = kSubTypeX86_AVX2
#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX__)
kSubTypeHost = kSubTypeX86_AVX
#elif (ASMJIT_ARCH_ARM32) && (defined(_M_ARMT) || defined(__thumb__) || defined(__thumb2__))
kSubTypeHost = kSubTypeA32_Thumb
#else
kSubTypeHost = 0
#endif
};
// --------------------------------------------------------------------------
// [Utilities]
// --------------------------------------------------------------------------
static ASMJIT_INLINE bool isX86Family(uint32_t archType) noexcept { return archType >= kTypeX86 && archType <= kTypeX32; }
static ASMJIT_INLINE bool isArmFamily(uint32_t archType) noexcept { return archType >= kTypeA32 && archType <= kTypeA64; }
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE ArchInfo() noexcept : _signature(0) {}
ASMJIT_INLINE ArchInfo(const ArchInfo& other) noexcept : _signature(other._signature) {}
explicit ASMJIT_INLINE ArchInfo(uint32_t type, uint32_t subType = kSubTypeNone) noexcept { init(type, subType); }
ASMJIT_INLINE static ArchInfo host() noexcept { return ArchInfo(kTypeHost, kSubTypeHost); }
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isInitialized() const noexcept { return _type != kTypeNone; }
ASMJIT_API void init(uint32_t type, uint32_t subType = kSubTypeNone) noexcept;
ASMJIT_INLINE void reset() noexcept { _signature = 0; }
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get if the architecture is 32-bit.
ASMJIT_INLINE bool is32Bit() const noexcept { return _gpSize == 4; }
//! Get if the architecture is 64-bit.
ASMJIT_INLINE bool is64Bit() const noexcept { return _gpSize == 8; }
//! Get architecture type, see \ref Type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
//! Get architecture sub-type, see \ref SubType.
//!
//! X86 & X64
//! ---------
//!
//! Architecture subtype describe the highest instruction-set level that can
//! be used.
//!
//! ARM32
//! -----
//!
//! Architecture mode means the instruction encoding to be used when generating
//! machine code, thus mode can be used to force generation of THUMB and THUMB2
//! encoding or regular ARM encoding.
//!
//! ARM64
//! -----
//!
//! No meaning yet.
ASMJIT_INLINE uint32_t getSubType() const noexcept { return _subType; }
//! Get if the architecture is X86, X64, or X32.
ASMJIT_INLINE bool isX86Family() const noexcept { return isX86Family(_type); }
//! Get if the architecture is ARM32 or ARM64.
ASMJIT_INLINE bool isArmFamily() const noexcept { return isArmFamily(_type); }
//! Get a size of a general-purpose register.
ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _gpSize; }
//! Get number of general-purpose registers.
ASMJIT_INLINE uint32_t getGpCount() const noexcept { return _gpCount; }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE const ArchInfo& operator=(const ArchInfo& other) noexcept { _signature = other._signature; return *this; }
ASMJIT_INLINE bool operator==(const ArchInfo& other) const noexcept { return _signature == other._signature; }
ASMJIT_INLINE bool operator!=(const ArchInfo& other) const noexcept { return _signature != other._signature; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
uint8_t _type; //!< Architecture type.
uint8_t _subType; //!< Architecture sub-type.
uint8_t _gpSize; //!< Default size of a general purpose register.
uint8_t _gpCount; //!< Count of all general purpose registers.
};
uint32_t _signature; //!< Architecture signature (32-bit int).
};
};
// ============================================================================
// [asmjit::ArchRegs]
// ============================================================================
//! Information about all architecture registers.
struct ArchRegs {
//! Register information and signatures indexed by \ref Reg::Type.
RegInfo regInfo[Reg::kRegMax + 1];
//! Count (maximum) of registers per \ref Reg::Type.
uint8_t regCount[Reg::kRegMax + 1];
//! Converts RegType to TypeId, see \ref TypeId::Id.
uint8_t regTypeToTypeId[Reg::kRegMax + 1];
};
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
struct ArchUtils {
ASMJIT_API static Error typeIdToRegInfo(uint32_t archType, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_ARCH_H

View file

@ -0,0 +1,447 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/assembler.h"
#include "../base/constpool.h"
#include "../base/utils.h"
#include "../base/vmem.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Assembler - Construction / Destruction]
// ============================================================================
Assembler::Assembler() noexcept
: CodeEmitter(kTypeAssembler),
_section(nullptr),
_bufferData(nullptr),
_bufferEnd(nullptr),
_bufferPtr(nullptr),
_op4(),
_op5() {}
Assembler::~Assembler() noexcept {
if (_code) sync();
}
// ============================================================================
// [asmjit::Assembler - Events]
// ============================================================================
Error Assembler::onAttach(CodeHolder* code) noexcept {
// Attach to the end of the .text section.
_section = code->_sections[0];
uint8_t* p = _section->_buffer._data;
_bufferData = p;
_bufferEnd = p + _section->_buffer._capacity;
_bufferPtr = p + _section->_buffer._length;
_op4.reset();
_op5.reset();
return Base::onAttach(code);
}
Error Assembler::onDetach(CodeHolder* code) noexcept {
_section = nullptr;
_bufferData = nullptr;
_bufferEnd = nullptr;
_bufferPtr = nullptr;
_op4.reset();
_op5.reset();
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::Assembler - Code-Generation]
// ============================================================================
Error Assembler::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
_op4 = o4;
_op5 = o5;
_options |= kOptionOp4Op5Used;
return _emit(instId, o0, o1, o2, o3);
}
Error Assembler::_emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) {
const Operand_* op = opArray;
switch (opCount) {
case 0: return _emit(instId, _none, _none, _none, _none);
case 1: return _emit(instId, op[0], _none, _none, _none);
case 2: return _emit(instId, op[0], op[1], _none, _none);
case 3: return _emit(instId, op[0], op[1], op[2], _none);
case 4: return _emit(instId, op[0], op[1], op[2], op[3]);
case 5:
_op4 = op[4];
_op5.reset();
_options |= kOptionOp4Op5Used;
return _emit(instId, op[0], op[1], op[2], op[3]);
case 6:
_op4 = op[4];
_op5 = op[5];
_options |= kOptionOp4Op5Used;
return _emit(instId, op[0], op[1], op[2], op[3]);
default:
return DebugUtils::errored(kErrorInvalidArgument);
}
}
// ============================================================================
// [asmjit::Assembler - Sync]
// ============================================================================
void Assembler::sync() noexcept {
ASMJIT_ASSERT(_code != nullptr); // Only called by CodeHolder, so we must be attached.
ASMJIT_ASSERT(_section != nullptr); // One section must always be active, no matter what.
ASMJIT_ASSERT(_bufferData == _section->_buffer._data); // `_bufferStart` is a shortcut to `_section->buffer.data`.
// Update only if the current offset is greater than the section length.
size_t offset = (size_t)(_bufferPtr - _bufferData);
if (_section->getBuffer().getLength() < offset)
_section->_buffer._length = offset;
}
// ============================================================================
// [asmjit::Assembler - Code-Buffer]
// ============================================================================
Error Assembler::setOffset(size_t offset) {
if (_lastError) return _lastError;
size_t length = std::max(_section->getBuffer().getLength(), getOffset());
if (ASMJIT_UNLIKELY(offset > length))
return setLastError(DebugUtils::errored(kErrorInvalidArgument));
// If the `Assembler` generated any code the `_bufferPtr` may be higher than
// the section length stored in `CodeHolder` as it doesn't update it each
// time it generates machine code. This is the same as calling `sync()`.
if (_section->_buffer._length < length)
_section->_buffer._length = length;
_bufferPtr = _bufferData + offset;
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Comment]
// ============================================================================
Error Assembler::comment(const char* s, size_t len) {
if (_lastError) return _lastError;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled) {
Logger* logger = _code->getLogger();
logger->log(s, len);
logger->log("\n", 1);
return kErrorOk;
}
#else
ASMJIT_UNUSED(s);
ASMJIT_UNUSED(len);
#endif
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Building Blocks]
// ============================================================================
Label Assembler::newLabel() {
uint32_t id = 0;
if (!_lastError) {
ASMJIT_ASSERT(_code != nullptr);
Error err = _code->newLabelId(id);
if (ASMJIT_UNLIKELY(err)) setLastError(err);
}
return Label(id);
}
Label Assembler::newNamedLabel(const char* name, size_t nameLength, uint32_t type, uint32_t parentId) {
uint32_t id = 0;
if (!_lastError) {
ASMJIT_ASSERT(_code != nullptr);
Error err = _code->newNamedLabelId(id, name, nameLength, type, parentId);
if (ASMJIT_UNLIKELY(err)) setLastError(err);
}
return Label(id);
}
Error Assembler::bind(const Label& label) {
if (_lastError) return _lastError;
ASMJIT_ASSERT(_code != nullptr);
LabelEntry* le = _code->getLabelEntry(label);
if (ASMJIT_UNLIKELY(!le))
return setLastError(DebugUtils::errored(kErrorInvalidLabel));
// Label can be bound only once.
if (ASMJIT_UNLIKELY(le->isBound()))
return setLastError(DebugUtils::errored(kErrorLabelAlreadyBound));
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled) {
StringBuilderTmp<256> sb;
if (le->hasName())
sb.setFormat("%s:", le->getName());
else
sb.setFormat("L%u:", Operand::unpackId(label.getId()));
size_t binSize = 0;
if (!_code->_logger->hasOption(Logger::kOptionBinaryForm))
binSize = Globals::kInvalidIndex;
Logging::formatLine(sb, nullptr, binSize, 0, 0, getInlineComment());
_code->_logger->log(sb.getData(), sb.getLength());
}
#endif // !ASMJIT_DISABLE_LOGGING
Error err = kErrorOk;
size_t pos = getOffset();
LabelLink* link = le->_links;
LabelLink* prev = nullptr;
while (link) {
intptr_t offset = link->offset;
uint32_t relocId = link->relocId;
if (relocId != RelocEntry::kInvalidId) {
// Adjust relocation data.
RelocEntry* re = _code->_relocations[relocId];
re->_data += static_cast<uint64_t>(pos);
}
else {
// Not using relocId, this means that we are overwriting a real
// displacement in the CodeBuffer.
int32_t patchedValue = static_cast<int32_t>(
static_cast<intptr_t>(pos) - offset + link->rel);
// Size of the value we are going to patch. Only BYTE/DWORD is allowed.
uint32_t size = _bufferData[offset];
if (size == 4)
Utils::writeI32u(_bufferData + offset, static_cast<int32_t>(patchedValue));
else if (size == 1 && Utils::isInt8(patchedValue))
_bufferData[offset] = static_cast<uint8_t>(patchedValue & 0xFF);
else
err = DebugUtils::errored(kErrorInvalidDisplacement);
}
prev = link->prev;
_code->_unresolvedLabelsCount--;
_code->_baseHeap.release(link, sizeof(LabelLink));
link = prev;
}
// Set as bound.
le->_sectionId = _section->getId();
le->_offset = pos;
le->_links = nullptr;
resetInlineComment();
if (err != kErrorOk)
return setLastError(err);
return kErrorOk;
}
Error Assembler::embed(const void* data, uint32_t size) {
if (_lastError) return _lastError;
if (getRemainingSpace() < size) {
Error err = _code->growBuffer(&_section->_buffer, size);
if (ASMJIT_UNLIKELY(err != kErrorOk)) return setLastError(err);
}
::memcpy(_bufferPtr, data, size);
_bufferPtr += size;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled)
_code->_logger->logBinary(data, size);
#endif // !ASMJIT_DISABLE_LOGGING
return kErrorOk;
}
Error Assembler::embedLabel(const Label& label) {
if (_lastError) return _lastError;
ASMJIT_ASSERT(_code != nullptr);
RelocEntry* re;
LabelEntry* le = _code->getLabelEntry(label);
if (ASMJIT_UNLIKELY(!le))
return setLastError(DebugUtils::errored(kErrorInvalidLabel));
Error err;
uint32_t gpSize = getGpSize();
if (getRemainingSpace() < gpSize) {
err = _code->growBuffer(&_section->_buffer, gpSize);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
}
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled)
_code->_logger->logf(gpSize == 4 ? ".dd L%u\n" : ".dq L%u\n", Operand::unpackId(label.getId()));
#endif // !ASMJIT_DISABLE_LOGGING
err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs, gpSize);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
re->_sourceSectionId = _section->getId();
re->_sourceOffset = static_cast<uint64_t>(getOffset());
if (le->isBound()) {
re->_targetSectionId = le->getSectionId();
re->_data = static_cast<uint64_t>(static_cast<int64_t>(le->getOffset()));
}
else {
LabelLink* link = _code->newLabelLink(le, _section->getId(), getOffset(), 0);
if (ASMJIT_UNLIKELY(!link))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
link->relocId = re->getId();
}
// Emit dummy DWORD/QWORD depending on the address size.
::memset(_bufferPtr, 0, gpSize);
_bufferPtr += gpSize;
return kErrorOk;
}
Error Assembler::embedConstPool(const Label& label, const ConstPool& pool) {
if (_lastError) return _lastError;
if (!isLabelValid(label))
return DebugUtils::errored(kErrorInvalidLabel);
ASMJIT_PROPAGATE(align(kAlignData, static_cast<uint32_t>(pool.getAlignment())));
ASMJIT_PROPAGATE(bind(label));
size_t size = pool.getSize();
if (getRemainingSpace() < size) {
Error err = _code->growBuffer(&_section->_buffer, size);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
}
uint8_t* p = _bufferPtr;
pool.fill(p);
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled)
_code->_logger->logBinary(p, size);
#endif // !ASMJIT_DISABLE_LOGGING
_bufferPtr += size;
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Emit-Helpers]
// ============================================================================
#if !defined(ASMJIT_DISABLE_LOGGING)
void Assembler::_emitLog(
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3,
uint32_t relSize, uint32_t imLen, uint8_t* afterCursor) {
Logger* logger = _code->getLogger();
ASMJIT_ASSERT(logger != nullptr);
ASMJIT_ASSERT(options & CodeEmitter::kOptionLoggingEnabled);
StringBuilderTmp<256> sb;
uint32_t logOptions = logger->getOptions();
uint8_t* beforeCursor = _bufferPtr;
intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
sb.appendString(logger->getIndentation());
Operand_ opArray[6];
opArray[0].copyFrom(o0);
opArray[1].copyFrom(o1);
opArray[2].copyFrom(o2);
opArray[3].copyFrom(o3);
if (options & kOptionOp4Op5Used) {
opArray[4].copyFrom(_op4);
opArray[5].copyFrom(_op5);
}
else {
opArray[4].reset();
opArray[5].reset();
}
Logging::formatInstruction(
sb, logOptions,
this, getArchType(),
Inst::Detail(instId, options, _extraReg), opArray, 6);
if ((logOptions & Logger::kOptionBinaryForm) != 0)
Logging::formatLine(sb, _bufferPtr, emittedSize, relSize, imLen, getInlineComment());
else
Logging::formatLine(sb, nullptr, Globals::kInvalidIndex, 0, 0, getInlineComment());
logger->log(sb.getData(), sb.getLength());
}
Error Assembler::_emitFailed(
Error err,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
StringBuilderTmp<256> sb;
sb.appendString(DebugUtils::errorAsString(err));
sb.appendString(": ");
Operand_ opArray[6];
opArray[0].copyFrom(o0);
opArray[1].copyFrom(o1);
opArray[2].copyFrom(o2);
opArray[3].copyFrom(o3);
if (options & kOptionOp4Op5Used) {
opArray[4].copyFrom(_op4);
opArray[5].copyFrom(_op5);
}
else {
opArray[4].reset();
opArray[5].reset();
}
Logging::formatInstruction(
sb, 0,
this, getArchType(),
Inst::Detail(instId, options, _extraReg), opArray, 6);
resetOptions();
resetExtraReg();
resetInlineComment();
return setLastError(err, sb.getData());
}
#endif
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,154 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_ASSEMBLER_H
#define _ASMJIT_BASE_ASSEMBLER_H
// [Dependencies]
#include "../base/codeemitter.h"
#include "../base/codeholder.h"
#include "../base/operand.h"
#include "../base/simdtypes.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::Assembler]
// ============================================================================
//! Base assembler.
//!
//! This class implements a base interface that is used by architecture
//! specific assemblers.
//!
//! \sa CodeCompiler.
class ASMJIT_VIRTAPI Assembler : public CodeEmitter {
public:
ASMJIT_NONCOPYABLE(Assembler)
typedef CodeEmitter Base;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `Assembler` instance.
ASMJIT_API Assembler() noexcept;
//! Destroy the `Assembler` instance.
ASMJIT_API virtual ~Assembler() noexcept;
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
// --------------------------------------------------------------------------
// [Code-Generation]
// --------------------------------------------------------------------------
using CodeEmitter::_emit;
ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) override;
ASMJIT_API Error _emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) override;
// --------------------------------------------------------------------------
// [Code-Buffer]
// --------------------------------------------------------------------------
//! Called by \ref CodeHolder::sync().
ASMJIT_API virtual void sync() noexcept;
//! Get the capacity of the current CodeBuffer.
ASMJIT_INLINE size_t getBufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
//! Get the number of remaining bytes in the current CodeBuffer.
ASMJIT_INLINE size_t getRemainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
//! Get the current position in the CodeBuffer.
ASMJIT_INLINE size_t getOffset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
//! Set the current position in the CodeBuffer to `offset`.
//!
//! NOTE: The `offset` cannot be outside of the buffer length (even if it's
//! within buffer's capacity).
ASMJIT_API Error setOffset(size_t offset);
//! Get start of the CodeBuffer of the current section.
ASMJIT_INLINE uint8_t* getBufferData() const noexcept { return _bufferData; }
//! Get end (first invalid byte) of the current section.
ASMJIT_INLINE uint8_t* getBufferEnd() const noexcept { return _bufferEnd; }
//! Get pointer in the CodeBuffer of the current section.
ASMJIT_INLINE uint8_t* getBufferPtr() const noexcept { return _bufferPtr; }
// --------------------------------------------------------------------------
// [Code-Generation]
// --------------------------------------------------------------------------
ASMJIT_API Label newLabel() override;
ASMJIT_API Label newNamedLabel(
const char* name,
size_t nameLength = Globals::kInvalidIndex,
uint32_t type = Label::kTypeGlobal,
uint32_t parentId = 0) override;
ASMJIT_API Error bind(const Label& label) override;
ASMJIT_API Error embed(const void* data, uint32_t size) override;
ASMJIT_API Error embedLabel(const Label& label) override;
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
ASMJIT_API Error comment(const char* s, size_t len = Globals::kInvalidIndex) override;
// --------------------------------------------------------------------------
// [Emit-Helpers]
// --------------------------------------------------------------------------
protected:
#if !defined(ASMJIT_DISABLE_LOGGING)
void _emitLog(
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3,
uint32_t relSize, uint32_t imLen, uint8_t* afterCursor);
Error _emitFailed(
Error err,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
#else
ASMJIT_INLINE Error _emitFailed(
uint32_t err,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
resetOptions();
resetInlineComment();
return setLastError(err);
}
#endif
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
public:
SectionEntry* _section; //!< Current section where the assembling happens.
uint8_t* _bufferData; //!< Start of the CodeBuffer of the current section.
uint8_t* _bufferEnd; //!< End (first invalid byte) of the current section.
uint8_t* _bufferPtr; //!< Pointer in the CodeBuffer of the current section.
Operand_ _op4; //!< 5th operand data, used only temporarily.
Operand_ _op5; //!< 6th operand data, used only temporarily.
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_ASSEMBLER_H

View file

@ -0,0 +1,584 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_BUILDER)
// [Dependencies]
#include "../base/codebuilder.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CodeBuilder - Construction / Destruction]
// ============================================================================
CodeBuilder::CodeBuilder() noexcept
: CodeEmitter(kTypeBuilder),
_cbBaseZone(32768 - Zone::kZoneOverhead),
_cbDataZone(16384 - Zone::kZoneOverhead),
_cbPassZone(32768 - Zone::kZoneOverhead),
_cbHeap(&_cbBaseZone),
_cbPasses(),
_cbLabels(),
_firstNode(nullptr),
_lastNode(nullptr),
_cursor(nullptr),
_position(0),
_nodeFlags(0) {}
CodeBuilder::~CodeBuilder() noexcept {}
// ============================================================================
// [asmjit::CodeBuilder - Events]
// ============================================================================
Error CodeBuilder::onAttach(CodeHolder* code) noexcept {
return Base::onAttach(code);
}
Error CodeBuilder::onDetach(CodeHolder* code) noexcept {
_cbPasses.reset();
_cbLabels.reset();
_cbHeap.reset(&_cbBaseZone);
_cbBaseZone.reset(false);
_cbDataZone.reset(false);
_cbPassZone.reset(false);
_position = 0;
_nodeFlags = 0;
_firstNode = nullptr;
_lastNode = nullptr;
_cursor = nullptr;
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::CodeBuilder - Node-Factory]
// ============================================================================
Error CodeBuilder::getCBLabel(CBLabel** pOut, uint32_t id) noexcept {
if (_lastError) return _lastError;
ASMJIT_ASSERT(_code != nullptr);
size_t index = Operand::unpackId(id);
if (ASMJIT_UNLIKELY(index >= _code->getLabelsCount()))
return DebugUtils::errored(kErrorInvalidLabel);
if (index >= _cbLabels.getLength())
ASMJIT_PROPAGATE(_cbLabels.resize(&_cbHeap, index + 1));
CBLabel* node = _cbLabels[index];
if (!node) {
node = newNodeT<CBLabel>(id);
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorNoHeapMemory);
_cbLabels[index] = node;
}
*pOut = node;
return kErrorOk;
}
Error CodeBuilder::registerLabelNode(CBLabel* node) noexcept {
if (_lastError) return _lastError;
ASMJIT_ASSERT(_code != nullptr);
// Don't call setLastError() from here, we are noexcept and we are called
// by `newLabelNode()` and `newFuncNode()`, which are noexcept as well.
uint32_t id;
ASMJIT_PROPAGATE(_code->newLabelId(id));
size_t index = Operand::unpackId(id);
// We just added one label so it must be true.
ASMJIT_ASSERT(_cbLabels.getLength() < index + 1);
ASMJIT_PROPAGATE(_cbLabels.resize(&_cbHeap, index + 1));
_cbLabels[index] = node;
node->_id = id;
return kErrorOk;
}
CBLabel* CodeBuilder::newLabelNode() noexcept {
CBLabel* node = newNodeT<CBLabel>();
if (!node || registerLabelNode(node) != kErrorOk)
return nullptr;
return node;
}
CBAlign* CodeBuilder::newAlignNode(uint32_t mode, uint32_t alignment) noexcept {
return newNodeT<CBAlign>(mode, alignment);
}
CBData* CodeBuilder::newDataNode(const void* data, uint32_t size) noexcept {
if (size > CBData::kInlineBufferSize) {
void* cloned = _cbDataZone.alloc(size);
if (!cloned) return nullptr;
if (data) ::memcpy(cloned, data, size);
data = cloned;
}
return newNodeT<CBData>(const_cast<void*>(data), size);
}
CBConstPool* CodeBuilder::newConstPool() noexcept {
CBConstPool* node = newNodeT<CBConstPool>();
if (!node || registerLabelNode(node) != kErrorOk)
return nullptr;
return node;
}
CBComment* CodeBuilder::newCommentNode(const char* s, size_t len) noexcept {
if (s) {
if (len == Globals::kInvalidIndex) len = ::strlen(s);
if (len > 0) {
s = static_cast<char*>(_cbDataZone.dup(s, len, true));
if (!s) return nullptr;
}
}
return newNodeT<CBComment>(s);
}
// ============================================================================
// [asmjit::CodeBuilder - Code-Emitter]
// ============================================================================
Label CodeBuilder::newLabel() {
uint32_t id = kInvalidValue;
if (!_lastError) {
CBLabel* node = newNodeT<CBLabel>(id);
if (ASMJIT_UNLIKELY(!node)) {
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
else {
Error err = registerLabelNode(node);
if (ASMJIT_UNLIKELY(err))
setLastError(err);
else
id = node->getId();
}
}
return Label(id);
}
Label CodeBuilder::newNamedLabel(const char* name, size_t nameLength, uint32_t type, uint32_t parentId) {
uint32_t id = kInvalidValue;
if (!_lastError) {
CBLabel* node = newNodeT<CBLabel>(id);
if (ASMJIT_UNLIKELY(!node)) {
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
else {
Error err = _code->newNamedLabelId(id, name, nameLength, type, parentId);
if (ASMJIT_UNLIKELY(err))
setLastError(err);
else
id = node->getId();
}
}
return Label(id);
}
Error CodeBuilder::bind(const Label& label) {
if (_lastError) return _lastError;
CBLabel* node;
Error err = getCBLabel(&node, label);
if (ASMJIT_UNLIKELY(err))
return setLastError(err);
addNode(node);
return kErrorOk;
}
Error CodeBuilder::align(uint32_t mode, uint32_t alignment) {
if (_lastError) return _lastError;
CBAlign* node = newAlignNode(mode, alignment);
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
Error CodeBuilder::embed(const void* data, uint32_t size) {
if (_lastError) return _lastError;
CBData* node = newDataNode(data, size);
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
Error CodeBuilder::embedLabel(const Label& label) {
if (_lastError) return _lastError;
CBLabelData* node = newNodeT<CBLabelData>(label.getId());
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
Error CodeBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
if (_lastError) return _lastError;
if (!isLabelValid(label))
return setLastError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(kAlignData, static_cast<uint32_t>(pool.getAlignment())));
ASMJIT_PROPAGATE(bind(label));
CBData* node = newDataNode(nullptr, static_cast<uint32_t>(pool.getSize()));
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
pool.fill(node->getData());
addNode(node);
return kErrorOk;
}
Error CodeBuilder::comment(const char* s, size_t len) {
if (_lastError) return _lastError;
CBComment* node = newCommentNode(s, len);
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeBuilder - Node-Management]
// ============================================================================
CBNode* CodeBuilder::addNode(CBNode* node) noexcept {
ASMJIT_ASSERT(node);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
if (!_cursor) {
if (!_firstNode) {
_firstNode = node;
_lastNode = node;
}
else {
node->_next = _firstNode;
_firstNode->_prev = node;
_firstNode = node;
}
}
else {
CBNode* prev = _cursor;
CBNode* next = _cursor->_next;
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
}
_cursor = node;
return node;
}
CBNode* CodeBuilder::addAfter(CBNode* node, CBNode* ref) noexcept {
ASMJIT_ASSERT(node);
ASMJIT_ASSERT(ref);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
CBNode* prev = ref;
CBNode* next = ref->_next;
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
return node;
}
CBNode* CodeBuilder::addBefore(CBNode* node, CBNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
ASMJIT_ASSERT(ref != nullptr);
CBNode* prev = ref->_prev;
CBNode* next = ref;
node->_prev = prev;
node->_next = next;
next->_prev = node;
if (prev)
prev->_next = node;
else
_firstNode = node;
return node;
}
static ASMJIT_INLINE void CodeBuilder_nodeRemoved(CodeBuilder* self, CBNode* node_) noexcept {
if (node_->isJmpOrJcc()) {
CBJump* node = static_cast<CBJump*>(node_);
CBLabel* label = node->getTarget();
if (label) {
// Disconnect.
CBJump** pPrev = &label->_from;
for (;;) {
ASMJIT_ASSERT(*pPrev != nullptr);
CBJump* current = *pPrev;
if (!current) break;
if (current == node) {
*pPrev = node->_jumpNext;
break;
}
pPrev = &current->_jumpNext;
}
label->subNumRefs();
}
}
}
CBNode* CodeBuilder::removeNode(CBNode* node) noexcept {
CBNode* prev = node->_prev;
CBNode* next = node->_next;
if (_firstNode == node)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == node)
_lastNode = prev;
else
next->_prev = prev;
node->_prev = nullptr;
node->_next = nullptr;
if (_cursor == node)
_cursor = prev;
CodeBuilder_nodeRemoved(this, node);
return node;
}
void CodeBuilder::removeNodes(CBNode* first, CBNode* last) noexcept {
if (first == last) {
removeNode(first);
return;
}
CBNode* prev = first->_prev;
CBNode* next = last->_next;
if (_firstNode == first)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == last)
_lastNode = prev;
else
next->_prev = prev;
CBNode* node = first;
for (;;) {
CBNode* next = node->getNext();
ASMJIT_ASSERT(next != nullptr);
node->_prev = nullptr;
node->_next = nullptr;
if (_cursor == node)
_cursor = prev;
CodeBuilder_nodeRemoved(this, node);
if (node == last)
break;
node = next;
}
}
CBNode* CodeBuilder::setCursor(CBNode* node) noexcept {
CBNode* old = _cursor;
_cursor = node;
return old;
}
// ============================================================================
// [asmjit::CodeBuilder - Passes]
// ============================================================================
ASMJIT_FAVOR_SIZE CBPass* CodeBuilder::getPassByName(const char* name) const noexcept {
for (size_t i = 0, len = _cbPasses.getLength(); i < len; i++) {
CBPass* pass = _cbPasses[i];
if (::strcmp(pass->getName(), name) == 0)
return pass;
}
return nullptr;
}
ASMJIT_FAVOR_SIZE Error CodeBuilder::addPass(CBPass* pass) noexcept {
if (ASMJIT_UNLIKELY(pass == nullptr)) {
// Since this is directly called by `addPassT()` we treat `null` argument
// as out-of-memory condition. Otherwise it would be API misuse.
return DebugUtils::errored(kErrorNoHeapMemory);
}
else if (ASMJIT_UNLIKELY(pass->_cb)) {
// Kind of weird, but okay...
if (pass->_cb == this)
return kErrorOk;
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_PROPAGATE(_cbPasses.append(&_cbHeap, pass));
pass->_cb = this;
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error CodeBuilder::deletePass(CBPass* pass) noexcept {
if (ASMJIT_UNLIKELY(pass == nullptr))
return DebugUtils::errored(kErrorInvalidArgument);
if (pass->_cb != nullptr) {
if (pass->_cb != this)
return DebugUtils::errored(kErrorInvalidState);
size_t index = _cbPasses.indexOf(pass);
ASMJIT_ASSERT(index != Globals::kInvalidIndex);
pass->_cb = nullptr;
_cbPasses.removeAt(index);
}
pass->~CBPass();
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeBuilder - Serialization]
// ============================================================================
Error CodeBuilder::serialize(CodeEmitter* dst) {
Error err = kErrorOk;
CBNode* node_ = getFirstNode();
do {
dst->setInlineComment(node_->getInlineComment());
switch (node_->getType()) {
case CBNode::kNodeAlign: {
CBAlign* node = static_cast<CBAlign*>(node_);
err = dst->align(node->getMode(), node->getAlignment());
break;
}
case CBNode::kNodeData: {
CBData* node = static_cast<CBData*>(node_);
err = dst->embed(node->getData(), node->getSize());
break;
}
case CBNode::kNodeFunc:
case CBNode::kNodeLabel: {
CBLabel* node = static_cast<CBLabel*>(node_);
err = dst->bind(node->getLabel());
break;
}
case CBNode::kNodeLabelData: {
CBLabelData* node = static_cast<CBLabelData*>(node_);
err = dst->embedLabel(node->getLabel());
break;
}
case CBNode::kNodeConstPool: {
CBConstPool* node = static_cast<CBConstPool*>(node_);
err = dst->embedConstPool(node->getLabel(), node->getConstPool());
break;
}
case CBNode::kNodeInst:
case CBNode::kNodeFuncCall: {
CBInst* node = node_->as<CBInst>();
dst->setOptions(node->getOptions());
dst->setExtraReg(node->getExtraReg());
err = dst->emitOpArray(node->getInstId(), node->getOpArray(), node->getOpCount());
break;
}
case CBNode::kNodeComment: {
CBComment* node = static_cast<CBComment*>(node_);
err = dst->comment(node->getInlineComment());
break;
}
default:
break;
}
if (err) break;
node_ = node_->getNext();
} while (node_);
return err;
}
// ============================================================================
// [asmjit::CBPass]
// ============================================================================
CBPass::CBPass(const char* name) noexcept
: _cb(nullptr),
_name(name) {}
CBPass::~CBPass() noexcept {}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_BUILDER

View file

@ -0,0 +1,915 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CODEBUILDER_H
#define _ASMJIT_BASE_CODEBUILDER_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_BUILDER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/codeholder.h"
#include "../base/constpool.h"
#include "../base/inst.h"
#include "../base/operand.h"
#include "../base/utils.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [Forward Declarations]
// ============================================================================
class CBNode;
class CBPass;
class CBAlign;
class CBComment;
class CBConstPool;
class CBData;
class CBInst;
class CBJump;
class CBLabel;
class CBLabelData;
class CBSentinel;
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::CodeBuilder]
// ============================================================================
class ASMJIT_VIRTAPI CodeBuilder : public CodeEmitter {
public:
ASMJIT_NONCOPYABLE(CodeBuilder)
typedef CodeEmitter Base;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CodeBuilder` instance.
ASMJIT_API CodeBuilder() noexcept;
//! Destroy the `CodeBuilder` instance.
ASMJIT_API virtual ~CodeBuilder() noexcept;
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API virtual Error onDetach(CodeHolder* code) noexcept override;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get a vector of CBPass objects that will be executed by `process()`.
ASMJIT_INLINE const ZoneVector<CBPass*>& getPasses() const noexcept { return _cbPasses; }
//! Get a vector of CBLabel nodes.
//!
//! NOTE: If a label of some index is not associated with `CodeBuilder` it
//! would be null, so always check for nulls if you iterate over the vector.
ASMJIT_INLINE const ZoneVector<CBLabel*>& getLabels() const noexcept { return _cbLabels; }
//! Get the first node.
ASMJIT_INLINE CBNode* getFirstNode() const noexcept { return _firstNode; }
//! Get the last node.
ASMJIT_INLINE CBNode* getLastNode() const noexcept { return _lastNode; }
// --------------------------------------------------------------------------
// [Node-Management]
// --------------------------------------------------------------------------
//! \internal
template<typename T>
ASMJIT_INLINE T* newNodeT() noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this); }
//! \internal
template<typename T, typename P0>
ASMJIT_INLINE T* newNodeT(P0 p0) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0); }
//! \internal
template<typename T, typename P0, typename P1>
ASMJIT_INLINE T* newNodeT(P0 p0, P1 p1) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0, p1); }
//! \internal
template<typename T, typename P0, typename P1, typename P2>
ASMJIT_INLINE T* newNodeT(P0 p0, P1 p1, P2 p2) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0, p1, p2); }
ASMJIT_API Error registerLabelNode(CBLabel* node) noexcept;
//! Get `CBLabel` by `id`.
ASMJIT_API Error getCBLabel(CBLabel** pOut, uint32_t id) noexcept;
//! Get `CBLabel` by `label`.
ASMJIT_INLINE Error getCBLabel(CBLabel** pOut, const Label& label) noexcept { return getCBLabel(pOut, label.getId()); }
//! Create a new \ref CBLabel node.
ASMJIT_API CBLabel* newLabelNode() noexcept;
//! Create a new \ref CBAlign node.
ASMJIT_API CBAlign* newAlignNode(uint32_t mode, uint32_t alignment) noexcept;
//! Create a new \ref CBData node.
ASMJIT_API CBData* newDataNode(const void* data, uint32_t size) noexcept;
//! Create a new \ref CBConstPool node.
ASMJIT_API CBConstPool* newConstPool() noexcept;
//! Create a new \ref CBComment node.
ASMJIT_API CBComment* newCommentNode(const char* s, size_t len) noexcept;
// --------------------------------------------------------------------------
// [Code-Emitter]
// --------------------------------------------------------------------------
ASMJIT_API virtual Label newLabel() override;
ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameLength = Globals::kInvalidIndex, uint32_t type = Label::kTypeGlobal, uint32_t parentId = kInvalidValue) override;
ASMJIT_API virtual Error bind(const Label& label) override;
ASMJIT_API virtual Error align(uint32_t mode, uint32_t alignment) override;
ASMJIT_API virtual Error embed(const void* data, uint32_t size) override;
ASMJIT_API virtual Error embedLabel(const Label& label) override;
ASMJIT_API virtual Error embedConstPool(const Label& label, const ConstPool& pool) override;
ASMJIT_API virtual Error comment(const char* s, size_t len = Globals::kInvalidIndex) override;
// --------------------------------------------------------------------------
// [Node-Management]
// --------------------------------------------------------------------------
//! Add `node` after the current and set current to `node`.
ASMJIT_API CBNode* addNode(CBNode* node) noexcept;
//! Insert `node` after `ref`.
ASMJIT_API CBNode* addAfter(CBNode* node, CBNode* ref) noexcept;
//! Insert `node` before `ref`.
ASMJIT_API CBNode* addBefore(CBNode* node, CBNode* ref) noexcept;
//! Remove `node`.
ASMJIT_API CBNode* removeNode(CBNode* node) noexcept;
//! Remove multiple nodes.
ASMJIT_API void removeNodes(CBNode* first, CBNode* last) noexcept;
//! Get current node.
//!
//! \note If this method returns null it means that nothing has been
//! emitted yet.
ASMJIT_INLINE CBNode* getCursor() const noexcept { return _cursor; }
//! Set the current node without returning the previous node.
ASMJIT_INLINE void _setCursor(CBNode* node) noexcept { _cursor = node; }
//! Set the current node to `node` and return the previous one.
ASMJIT_API CBNode* setCursor(CBNode* node) noexcept;
// --------------------------------------------------------------------------
// [Passes]
// --------------------------------------------------------------------------
template<typename T>
ASMJIT_INLINE T* newPassT() noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(); }
template<typename T, typename P0>
ASMJIT_INLINE T* newPassT(P0 p0) noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(p0); }
template<typename T, typename P0, typename P1>
ASMJIT_INLINE T* newPassT(P0 p0, P1 p1) noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(p0, p1); }
template<typename T>
ASMJIT_INLINE Error addPassT() noexcept { return addPass(newPassT<T>()); }
template<typename T, typename P0>
ASMJIT_INLINE Error addPassT(P0 p0) noexcept { return addPass(newPassT<P0>(p0)); }
template<typename T, typename P0, typename P1>
ASMJIT_INLINE Error addPassT(P0 p0, P1 p1) noexcept { return addPass(newPassT<P0, P1>(p0, p1)); }
//! Get a `CBPass` by name.
ASMJIT_API CBPass* getPassByName(const char* name) const noexcept;
//! Add `pass` to the list of passes.
ASMJIT_API Error addPass(CBPass* pass) noexcept;
//! Remove `pass` from the list of passes and delete it.
ASMJIT_API Error deletePass(CBPass* pass) noexcept;
// --------------------------------------------------------------------------
// [Serialization]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error serialize(CodeEmitter* dst);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Zone _cbBaseZone; //!< Base zone used to allocate nodes and `CBPass`.
Zone _cbDataZone; //!< Data zone used to allocate data and names.
Zone _cbPassZone; //!< Zone passed to `CBPass::process()`.
ZoneHeap _cbHeap; //!< ZoneHeap that uses `_cbBaseZone`.
ZoneVector<CBPass*> _cbPasses; //!< Array of `CBPass` objects.
ZoneVector<CBLabel*> _cbLabels; //!< Maps label indexes to `CBLabel` nodes.
CBNode* _firstNode; //!< First node of the current section.
CBNode* _lastNode; //!< Last node of the current section.
CBNode* _cursor; //!< Current node (cursor).
uint32_t _position; //!< Flow-id assigned to each new node.
uint32_t _nodeFlags; //!< Flags assigned to each new node.
};
// ============================================================================
// [asmjit::CBPass]
// ============================================================================
//! `CodeBuilder` pass used to code transformations, analysis, and lowering.
class ASMJIT_VIRTAPI CBPass {
public:
ASMJIT_NONCOPYABLE(CBPass);
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API CBPass(const char* name) noexcept;
ASMJIT_API virtual ~CBPass() noexcept;
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Process the code stored in CodeBuffer `cb`.
//!
//! This is the only function that is called by the `CodeBuilder` to process
//! the code. It passes the CodeBuilder itself (`cb`) and also a zone memory
//! allocator `zone`, which will be reset after the `process()` returns. The
//! allocator should be used for all allocations as it's fast and everything
//! it allocates will be released at once when `process()` returns.
virtual Error process(Zone* zone) noexcept = 0;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE const CodeBuilder* cb() const noexcept { return _cb; }
ASMJIT_INLINE const char* getName() const noexcept { return _name; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CodeBuilder* _cb; //!< CodeBuilder this pass is assigned to.
const char* _name; //!< Name of the pass.
};
// ============================================================================
// [asmjit::CBNode]
// ============================================================================
//! Node (CodeBuilder).
//!
//! Every node represents a building-block used by \ref CodeBuilder. It can be
//! instruction, data, label, comment, directive, or any other high-level
//! representation that can be transformed to the building blocks mentioned.
//! Every class that inherits \ref CodeBuilder can define its own nodes that it
//! can lower to basic nodes.
class CBNode {
public:
ASMJIT_NONCOPYABLE(CBNode)
// --------------------------------------------------------------------------
// [Type]
// --------------------------------------------------------------------------
//! Type of \ref CBNode.
ASMJIT_ENUM(NodeType) {
kNodeNone = 0, //!< Invalid node (internal, don't use).
// [CodeBuilder]
kNodeInst = 1, //!< Node is \ref CBInst or \ref CBJump.
kNodeData = 2, //!< Node is \ref CBData.
kNodeAlign = 3, //!< Node is \ref CBAlign.
kNodeLabel = 4, //!< Node is \ref CBLabel.
kNodeLabelData = 5, //!< Node is \ref CBLabelData.
kNodeConstPool = 6, //!< Node is \ref CBConstPool.
kNodeComment = 7, //!< Node is \ref CBComment.
kNodeSentinel = 8, //!< Node is \ref CBSentinel.
// [CodeCompiler]
kNodeFunc = 16, //!< Node is \ref CCFunc (considered as \ref CBLabel by \ref CodeBuilder).
kNodeFuncExit = 17, //!< Node is \ref CCFuncRet.
kNodeFuncCall = 18, //!< Node is \ref CCFuncCall.
kNodePushArg = 19, //!< Node is \ref CCPushArg.
kNodeHint = 20, //!< Node is \ref CCHint.
// [UserDefined]
kNodeUser = 32 //!< First id of a user-defined node.
};
// --------------------------------------------------------------------------
// [Flags]
// --------------------------------------------------------------------------
ASMJIT_ENUM(Flags) {
//! The node has been translated by the CodeCompiler.
kFlagIsTranslated = 0x0001,
//! If the node can be safely removed (has no effect).
kFlagIsRemovable = 0x0004,
//! If the node is informative only and can be safely removed.
kFlagIsInformative = 0x0008,
//! If the `CBInst` is a jump.
kFlagIsJmp = 0x0010,
//! If the `CBInst` is a conditional jump.
kFlagIsJcc = 0x0020,
//! If the `CBInst` is an unconditional jump or conditional jump that is
//! likely to be taken.
kFlagIsTaken = 0x0040,
//! If the `CBNode` will return from a function.
//!
//! This flag is used by both `CBSentinel` and `CCFuncRet`.
kFlagIsRet = 0x0080,
//! Whether the instruction is special.
kFlagIsSpecial = 0x0100,
//! Whether the instruction is an FPU instruction.
kFlagIsFp = 0x0200
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new \ref CBNode - always use \ref CodeBuilder to allocate nodes.
ASMJIT_INLINE CBNode(CodeBuilder* cb, uint32_t type) noexcept {
_prev = nullptr;
_next = nullptr;
_type = static_cast<uint8_t>(type);
_opCount = 0;
_flags = static_cast<uint16_t>(cb->_nodeFlags);
_position = cb->_position;
_inlineComment = nullptr;
_passData = nullptr;
}
//! Destroy the `CBNode` instance (NEVER CALLED).
ASMJIT_INLINE ~CBNode() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
template<typename T>
ASMJIT_INLINE T* as() noexcept { return static_cast<T*>(this); }
template<typename T>
ASMJIT_INLINE const T* as() const noexcept { return static_cast<const T*>(this); }
//! Get previous node in the compiler stream.
ASMJIT_INLINE CBNode* getPrev() const noexcept { return _prev; }
//! Get next node in the compiler stream.
ASMJIT_INLINE CBNode* getNext() const noexcept { return _next; }
//! Get the node type, see \ref Type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
//! Get the node flags.
ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
//! Get whether the instruction has flag `flag`.
ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (static_cast<uint32_t>(_flags) & flag) != 0; }
//! Set node flags to `flags`.
ASMJIT_INLINE void setFlags(uint32_t flags) noexcept { _flags = static_cast<uint16_t>(flags); }
//! Add instruction `flags`.
ASMJIT_INLINE void orFlags(uint32_t flags) noexcept { _flags |= static_cast<uint16_t>(flags); }
//! And instruction `flags`.
ASMJIT_INLINE void andFlags(uint32_t flags) noexcept { _flags &= static_cast<uint16_t>(flags); }
//! Clear instruction `flags`.
ASMJIT_INLINE void andNotFlags(uint32_t flags) noexcept { _flags &= ~static_cast<uint16_t>(flags); }
//! Get whether the node has been translated.
ASMJIT_INLINE bool isTranslated() const noexcept { return hasFlag(kFlagIsTranslated); }
//! Get whether the node is removable if it's in unreachable code block.
ASMJIT_INLINE bool isRemovable() const noexcept { return hasFlag(kFlagIsRemovable); }
//! Get whether the node is informative only (comment, hint).
ASMJIT_INLINE bool isInformative() const noexcept { return hasFlag(kFlagIsInformative); }
//! Whether the node is `CBLabel`.
ASMJIT_INLINE bool isLabel() const noexcept { return _type == kNodeLabel; }
//! Whether the `CBInst` node is an unconditional jump.
ASMJIT_INLINE bool isJmp() const noexcept { return hasFlag(kFlagIsJmp); }
//! Whether the `CBInst` node is a conditional jump.
ASMJIT_INLINE bool isJcc() const noexcept { return hasFlag(kFlagIsJcc); }
//! Whether the `CBInst` node is a conditional/unconditional jump.
ASMJIT_INLINE bool isJmpOrJcc() const noexcept { return hasFlag(kFlagIsJmp | kFlagIsJcc); }
//! Whether the `CBInst` node is a return.
ASMJIT_INLINE bool isRet() const noexcept { return hasFlag(kFlagIsRet); }
//! Get whether the node is `CBInst` and the instruction is special.
ASMJIT_INLINE bool isSpecial() const noexcept { return hasFlag(kFlagIsSpecial); }
//! Get whether the node is `CBInst` and the instruction uses x87-FPU.
ASMJIT_INLINE bool isFp() const noexcept { return hasFlag(kFlagIsFp); }
ASMJIT_INLINE bool hasPosition() const noexcept { return _position != 0; }
//! Get flow index.
ASMJIT_INLINE uint32_t getPosition() const noexcept { return _position; }
//! Set flow index.
ASMJIT_INLINE void setPosition(uint32_t position) noexcept { _position = position; }
//! Get if the node has an inline comment.
ASMJIT_INLINE bool hasInlineComment() const noexcept { return _inlineComment != nullptr; }
//! Get an inline comment string.
ASMJIT_INLINE const char* getInlineComment() const noexcept { return _inlineComment; }
//! Set an inline comment string to `s`.
ASMJIT_INLINE void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Set an inline comment string to null.
ASMJIT_INLINE void resetInlineComment() noexcept { _inlineComment = nullptr; }
//! Get if the node has associated work-data.
ASMJIT_INLINE bool hasPassData() const noexcept { return _passData != nullptr; }
//! Get work-data - data used during processing & transformations.
template<typename T>
ASMJIT_INLINE T* getPassData() const noexcept { return (T*)_passData; }
//! Set work-data to `data`.
template<typename T>
ASMJIT_INLINE void setPassData(T* data) noexcept { _passData = (void*)data; }
//! Reset work-data to null.
ASMJIT_INLINE void resetPassData() noexcept { _passData = nullptr; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CBNode* _prev; //!< Previous node.
CBNode* _next; //!< Next node.
uint8_t _type; //!< Node type, see \ref NodeType.
uint8_t _opCount; //!< Count of operands or zero.
uint16_t _flags; //!< Flags, different meaning for every type of the node.
uint32_t _position; //!< Flow index.
const char* _inlineComment; //!< Inline comment or null if not used.
void* _passData; //!< Data used exclusively by the current `CBPass`.
};
// ============================================================================
// [asmjit::CBInst]
// ============================================================================
//! Instruction (CodeBuilder).
//!
//! Wraps an instruction with its options and operands.
class CBInst : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBInst)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBInst` instance.
ASMJIT_INLINE CBInst(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept
: CBNode(cb, kNodeInst) {
orFlags(kFlagIsRemovable);
_instDetail.instId = static_cast<uint16_t>(instId);
_instDetail.options = options;
_opCount = static_cast<uint8_t>(opCount);
_opArray = opArray;
_updateMemOp();
}
//! Destroy the `CBInst` instance (NEVER CALLED).
ASMJIT_INLINE ~CBInst() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE Inst::Detail& getInstDetail() noexcept { return _instDetail; }
ASMJIT_INLINE const Inst::Detail& getInstDetail() const noexcept { return _instDetail; }
//! Get the instruction id, see \ref Inst::Id.
ASMJIT_INLINE uint32_t getInstId() const noexcept { return _instDetail.instId; }
//! Set the instruction id to `instId`, see \ref Inst::Id.
ASMJIT_INLINE void setInstId(uint32_t instId) noexcept { _instDetail.instId = instId; }
//! Whether the instruction is either a jump or a conditional jump likely to be taken.
ASMJIT_INLINE bool isTaken() const noexcept { return hasFlag(kFlagIsTaken); }
//! Get emit options.
ASMJIT_INLINE uint32_t getOptions() const noexcept { return _instDetail.options; }
//! Set emit options.
ASMJIT_INLINE void setOptions(uint32_t options) noexcept { _instDetail.options = options; }
//! Add emit options.
ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _instDetail.options |= options; }
//! Mask emit options.
ASMJIT_INLINE void andOptions(uint32_t options) noexcept { _instDetail.options &= options; }
//! Clear emit options.
ASMJIT_INLINE void delOptions(uint32_t options) noexcept { _instDetail.options &= ~options; }
//! Get if the node has an extra register operand.
ASMJIT_INLINE bool hasExtraReg() const noexcept { return _instDetail.hasExtraReg(); }
//! Get extra register operand.
ASMJIT_INLINE RegOnly& getExtraReg() noexcept { return _instDetail.extraReg; }
//! \overload
ASMJIT_INLINE const RegOnly& getExtraReg() const noexcept { return _instDetail.extraReg; }
//! Set extra register operand to `reg`.
ASMJIT_INLINE void setExtraReg(const Reg& reg) noexcept { _instDetail.extraReg.init(reg); }
//! Set extra register operand to `reg`.
ASMJIT_INLINE void setExtraReg(const RegOnly& reg) noexcept { _instDetail.extraReg.init(reg); }
//! Reset extra register operand.
ASMJIT_INLINE void resetExtraReg() noexcept { _instDetail.extraReg.reset(); }
//! Get operands count.
ASMJIT_INLINE uint32_t getOpCount() const noexcept { return _opCount; }
//! Get operands list.
ASMJIT_INLINE Operand* getOpArray() noexcept { return _opArray; }
//! \overload
ASMJIT_INLINE const Operand* getOpArray() const noexcept { return _opArray; }
//! Get whether the instruction contains a memory operand.
ASMJIT_INLINE bool hasMemOp() const noexcept { return _memOpIndex != 0xFF; }
//! Get memory operand.
//!
//! NOTE: Can only be called if the instruction has such operand,
//! see `hasMemOp()`.
ASMJIT_INLINE Mem* getMemOp() const noexcept {
ASMJIT_ASSERT(hasMemOp());
return static_cast<Mem*>(&_opArray[_memOpIndex]);
}
//! \overload
template<typename T>
ASMJIT_INLINE T* getMemOp() const noexcept {
ASMJIT_ASSERT(hasMemOp());
return static_cast<T*>(&_opArray[_memOpIndex]);
}
//! Set memory operand index, `0xFF` means no memory operand.
ASMJIT_INLINE void setMemOpIndex(uint32_t index) noexcept { _memOpIndex = static_cast<uint8_t>(index); }
//! Reset memory operand index to `0xFF` (no operand).
ASMJIT_INLINE void resetMemOpIndex() noexcept { _memOpIndex = 0xFF; }
// --------------------------------------------------------------------------
// [Utils]
// --------------------------------------------------------------------------
ASMJIT_INLINE void _updateMemOp() noexcept {
Operand* opArray = getOpArray();
uint32_t opCount = getOpCount();
uint32_t i;
for (i = 0; i < opCount; i++)
if (opArray[i].isMem())
goto Update;
i = 0xFF;
Update:
setMemOpIndex(i);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Inst::Detail _instDetail; //!< Instruction id, options, and extra register.
uint8_t _memOpIndex; //!< \internal
uint8_t _reserved[7]; //!< \internal
Operand* _opArray; //!< Instruction operands.
};
// ============================================================================
// [asmjit::CBInstEx]
// ============================================================================
struct CBInstEx : public CBInst {
Operand _op4;
Operand _op5;
};
// ============================================================================
// [asmjit::CBJump]
// ============================================================================
//! Asm jump (conditional or direct).
//!
//! Extension of `CBInst` node, which stores more information about the jump.
class CBJump : public CBInst {
public:
ASMJIT_NONCOPYABLE(CBJump)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE CBJump(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept
: CBInst(cb, instId, options, opArray, opCount),
_target(nullptr),
_jumpNext(nullptr) {}
ASMJIT_INLINE ~CBJump() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE CBLabel* getTarget() const noexcept { return _target; }
ASMJIT_INLINE CBJump* getJumpNext() const noexcept { return _jumpNext; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CBLabel* _target; //!< Target node.
CBJump* _jumpNext; //!< Next jump to the same target in a single linked-list.
};
// ============================================================================
// [asmjit::CBData]
// ============================================================================
//! Asm data (CodeBuilder).
//!
//! Wraps `.data` directive. The node contains data that will be placed at the
//! node's position in the assembler stream. The data is considered to be RAW;
//! no analysis nor byte-order conversion is performed on RAW data.
class CBData : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBData)
enum { kInlineBufferSize = static_cast<int>(64 - sizeof(CBNode) - 4) };
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBData` instance.
ASMJIT_INLINE CBData(CodeBuilder* cb, void* data, uint32_t size) noexcept : CBNode(cb, kNodeData) {
if (size <= kInlineBufferSize) {
if (data) ::memcpy(_buf, data, size);
}
else {
_externalPtr = static_cast<uint8_t*>(data);
}
_size = size;
}
//! Destroy the `CBData` instance (NEVER CALLED).
ASMJIT_INLINE ~CBData() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get size of the data.
uint32_t getSize() const noexcept { return _size; }
//! Get pointer to the data.
uint8_t* getData() const noexcept { return _size <= kInlineBufferSize ? const_cast<uint8_t*>(_buf) : _externalPtr; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
uint8_t _buf[kInlineBufferSize]; //!< Embedded data buffer.
uint32_t _size; //!< Size of the data.
};
struct {
uint8_t* _externalPtr; //!< Pointer to external data.
};
};
};
// ============================================================================
// [asmjit::CBAlign]
// ============================================================================
//! Align directive (CodeBuilder).
//!
//! Wraps `.align` directive.
class CBAlign : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBAlign)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBAlign` instance.
ASMJIT_INLINE CBAlign(CodeBuilder* cb, uint32_t mode, uint32_t alignment) noexcept
: CBNode(cb, kNodeAlign),
_mode(mode),
_alignment(alignment) {}
//! Destroy the `CBAlign` instance (NEVER CALLED).
ASMJIT_INLINE ~CBAlign() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get align mode.
ASMJIT_INLINE uint32_t getMode() const noexcept { return _mode; }
//! Set align mode.
ASMJIT_INLINE void setMode(uint32_t mode) noexcept { _mode = mode; }
//! Get align offset in bytes.
ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; }
//! Set align offset in bytes to `offset`.
ASMJIT_INLINE void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _mode; //!< Align mode, see \ref AlignMode.
uint32_t _alignment; //!< Alignment (in bytes).
};
// ============================================================================
// [asmjit::CBLabel]
// ============================================================================
//! Label (CodeBuilder).
class CBLabel : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBLabel)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBLabel` instance.
ASMJIT_INLINE CBLabel(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept
: CBNode(cb, kNodeLabel),
_id(id),
_numRefs(0),
_from(nullptr) {}
//! Destroy the `CBLabel` instance (NEVER CALLED).
ASMJIT_INLINE ~CBLabel() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the label id.
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
//! Get the label as `Label` operand.
ASMJIT_INLINE Label getLabel() const noexcept { return Label(_id); }
//! Get first jmp instruction.
ASMJIT_INLINE CBJump* getFrom() const noexcept { return _from; }
//! Get number of jumps to this target.
ASMJIT_INLINE uint32_t getNumRefs() const noexcept { return _numRefs; }
//! Set number of jumps to this target.
ASMJIT_INLINE void setNumRefs(uint32_t i) noexcept { _numRefs = i; }
//! Add number of jumps to this target.
ASMJIT_INLINE void addNumRefs(uint32_t i = 1) noexcept { _numRefs += i; }
//! Subtract number of jumps to this target.
ASMJIT_INLINE void subNumRefs(uint32_t i = 1) noexcept { _numRefs -= i; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _id; //!< Label id.
uint32_t _numRefs; //!< Count of jumps here.
CBJump* _from; //!< Linked-list of nodes that can jump here.
};
// ============================================================================
// [asmjit::CBLabelData]
// ============================================================================
class CBLabelData : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBLabelData)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBLabelData` instance.
ASMJIT_INLINE CBLabelData(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept
: CBNode(cb, kNodeLabelData),
_id(id) {}
//! Destroy the `CBLabelData` instance (NEVER CALLED).
ASMJIT_INLINE ~CBLabelData() noexcept {}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Get the label id.
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
//! Get the label as `Label` operand.
ASMJIT_INLINE Label getLabel() const noexcept { return Label(_id); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _id;
};
// ============================================================================
// [asmjit::CBConstPool]
// ============================================================================
class CBConstPool : public CBLabel {
public:
ASMJIT_NONCOPYABLE(CBConstPool)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBConstPool` instance.
ASMJIT_INLINE CBConstPool(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept
: CBLabel(cb, id),
_constPool(&cb->_cbBaseZone) { _type = kNodeConstPool; }
//! Destroy the `CBConstPool` instance (NEVER CALLED).
ASMJIT_INLINE ~CBConstPool() noexcept {}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_INLINE ConstPool& getConstPool() noexcept { return _constPool; }
ASMJIT_INLINE const ConstPool& getConstPool() const noexcept { return _constPool; }
//! Get whether the constant-pool is empty.
ASMJIT_INLINE bool isEmpty() const noexcept { return _constPool.isEmpty(); }
//! Get the size of the constant-pool in bytes.
ASMJIT_INLINE size_t getSize() const noexcept { return _constPool.getSize(); }
//! Get minimum alignment.
ASMJIT_INLINE size_t getAlignment() const noexcept { return _constPool.getAlignment(); }
//! See \ref ConstPool::add().
ASMJIT_INLINE Error add(const void* data, size_t size, size_t& dstOffset) noexcept {
return _constPool.add(data, size, dstOffset);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
ConstPool _constPool;
};
// ============================================================================
// [asmjit::CBComment]
// ============================================================================
//! Comment (CodeBuilder).
class CBComment : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBComment)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBComment` instance.
ASMJIT_INLINE CBComment(CodeBuilder* cb, const char* comment) noexcept : CBNode(cb, kNodeComment) {
orFlags(kFlagIsRemovable | kFlagIsInformative);
_inlineComment = comment;
}
//! Destroy the `CBComment` instance (NEVER CALLED).
ASMJIT_INLINE ~CBComment() noexcept {}
};
// ============================================================================
// [asmjit::CBSentinel]
// ============================================================================
//! Sentinel (CodeBuilder).
//!
//! Sentinel is a marker that is completely ignored by the code builder. It's
//! used to remember a position in a code as it never gets removed by any pass.
class CBSentinel : public CBNode {
public:
ASMJIT_NONCOPYABLE(CBSentinel)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CBSentinel` instance.
ASMJIT_INLINE CBSentinel(CodeBuilder* cb) noexcept : CBNode(cb, kNodeSentinel) {}
//! Destroy the `CBSentinel` instance (NEVER CALLED).
ASMJIT_INLINE ~CBSentinel() noexcept {}
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_BUILDER
#endif // _ASMJIT_BASE_CODEBUILDER_H

View file

@ -0,0 +1,573 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/codecompiler.h"
#include "../base/cpuinfo.h"
#include "../base/logging.h"
#include "../base/regalloc_p.h"
#include "../base/utils.h"
#include <stdarg.h>
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [Constants]
// ============================================================================
static const char noName[1] = { '\0' };
// ============================================================================
// [asmjit::CCFuncCall - Arg / Ret]
// ============================================================================
bool CCFuncCall::_setArg(uint32_t i, const Operand_& op) noexcept {
if ((i & ~kFuncArgHi) >= _funcDetail.getArgCount())
return false;
_args[i] = op;
return true;
}
bool CCFuncCall::_setRet(uint32_t i, const Operand_& op) noexcept {
if (i >= 2)
return false;
_ret[i] = op;
return true;
}
// ============================================================================
// [asmjit::CodeCompiler - Construction / Destruction]
// ============================================================================
CodeCompiler::CodeCompiler() noexcept
: CodeBuilder(),
_func(nullptr),
_vRegZone(4096 - Zone::kZoneOverhead),
_vRegArray(),
_localConstPool(nullptr),
_globalConstPool(nullptr) {
_type = kTypeCompiler;
}
CodeCompiler::~CodeCompiler() noexcept {}
// ============================================================================
// [asmjit::CodeCompiler - Events]
// ============================================================================
Error CodeCompiler::onAttach(CodeHolder* code) noexcept {
return Base::onAttach(code);
}
Error CodeCompiler::onDetach(CodeHolder* code) noexcept {
_func = nullptr;
_localConstPool = nullptr;
_globalConstPool = nullptr;
_vRegArray.reset();
_vRegZone.reset(false);
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::CodeCompiler - Node-Factory]
// ============================================================================
CCHint* CodeCompiler::newHintNode(Reg& r, uint32_t hint, uint32_t value) noexcept {
if (!r.isVirtReg()) return nullptr;
VirtReg* vr = getVirtReg(r);
return newNodeT<CCHint>(vr, hint, value);
}
// ============================================================================
// [asmjit::CodeCompiler - Func]
// ============================================================================
CCFunc* CodeCompiler::newFunc(const FuncSignature& sign) noexcept {
Error err;
CCFunc* func = newNodeT<CCFunc>();
if (!func) goto _NoMemory;
err = registerLabelNode(func);
if (ASMJIT_UNLIKELY(err)) {
// TODO: Calls setLastError, maybe rethink noexcept?
setLastError(err);
return nullptr;
}
// Create helper nodes.
func->_exitNode = newLabelNode();
func->_end = newNodeT<CBSentinel>();
if (!func->_exitNode || !func->_end)
goto _NoMemory;
// Function prototype.
err = func->getDetail().init(sign);
if (err != kErrorOk) {
setLastError(err);
return nullptr;
}
// If the CodeInfo guarantees higher alignment honor it.
if (_codeInfo.getStackAlignment() > func->_funcDetail._callConv.getNaturalStackAlignment())
func->_funcDetail._callConv.setNaturalStackAlignment(_codeInfo.getStackAlignment());
// Allocate space for function arguments.
func->_args = nullptr;
if (func->getArgCount() != 0) {
func->_args = _cbHeap.allocT<VirtReg*>(func->getArgCount() * sizeof(VirtReg*));
if (!func->_args) goto _NoMemory;
::memset(func->_args, 0, func->getArgCount() * sizeof(VirtReg*));
}
return func;
_NoMemory:
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
CCFunc* CodeCompiler::addFunc(CCFunc* func) {
ASMJIT_ASSERT(_func == nullptr);
_func = func;
addNode(func); // Function node.
CBNode* cursor = getCursor(); // {CURSOR}.
addNode(func->getExitNode()); // Function exit label.
addNode(func->getEnd()); // Function end marker.
_setCursor(cursor);
return func;
}
CCFunc* CodeCompiler::addFunc(const FuncSignature& sign) {
CCFunc* func = newFunc(sign);
if (!func) {
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
return addFunc(func);
}
CBSentinel* CodeCompiler::endFunc() {
CCFunc* func = getFunc();
if (!func) {
// TODO:
return nullptr;
}
// Add the local constant pool at the end of the function (if exists).
if (_localConstPool) {
setCursor(func->getEnd()->getPrev());
addNode(_localConstPool);
_localConstPool = nullptr;
}
// Mark as finished.
func->_isFinished = true;
_func = nullptr;
CBSentinel* end = func->getEnd();
setCursor(end);
return end;
}
// ============================================================================
// [asmjit::CodeCompiler - Ret]
// ============================================================================
CCFuncRet* CodeCompiler::newRet(const Operand_& o0, const Operand_& o1) noexcept {
CCFuncRet* node = newNodeT<CCFuncRet>(o0, o1);
if (!node) {
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
return node;
}
CCFuncRet* CodeCompiler::addRet(const Operand_& o0, const Operand_& o1) noexcept {
CCFuncRet* node = newRet(o0, o1);
if (!node) return nullptr;
return static_cast<CCFuncRet*>(addNode(node));
}
// ============================================================================
// [asmjit::CodeCompiler - Call]
// ============================================================================
CCFuncCall* CodeCompiler::newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept {
Error err;
uint32_t nArgs;
CCFuncCall* node = _cbHeap.allocT<CCFuncCall>(sizeof(CCFuncCall) + sizeof(Operand));
Operand* opArray = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(node) + sizeof(CCFuncCall));
if (ASMJIT_UNLIKELY(!node))
goto _NoMemory;
opArray[0].copyFrom(o0);
new (node) CCFuncCall(this, instId, 0, opArray, 1);
if ((err = node->getDetail().init(sign)) != kErrorOk) {
setLastError(err);
return nullptr;
}
// If there are no arguments skip the allocation.
if ((nArgs = sign.getArgCount()) == 0)
return node;
node->_args = static_cast<Operand*>(_cbHeap.alloc(nArgs * sizeof(Operand)));
if (!node->_args) goto _NoMemory;
::memset(node->_args, 0, nArgs * sizeof(Operand));
return node;
_NoMemory:
setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
CCFuncCall* CodeCompiler::addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept {
CCFuncCall* node = newCall(instId, o0, sign);
if (!node) return nullptr;
return static_cast<CCFuncCall*>(addNode(node));
}
// ============================================================================
// [asmjit::CodeCompiler - Vars]
// ============================================================================
Error CodeCompiler::setArg(uint32_t argIndex, const Reg& r) {
CCFunc* func = getFunc();
if (!func)
return setLastError(DebugUtils::errored(kErrorInvalidState));
if (!isVirtRegValid(r))
return setLastError(DebugUtils::errored(kErrorInvalidVirtId));
VirtReg* vr = getVirtReg(r);
func->setArg(argIndex, vr);
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeCompiler - Hint]
// ============================================================================
Error CodeCompiler::_hint(Reg& r, uint32_t hint, uint32_t value) {
if (!r.isVirtReg()) return kErrorOk;
CCHint* node = newHintNode(r, hint, value);
if (!node) return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeCompiler - Vars]
// ============================================================================
VirtReg* CodeCompiler::newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept {
size_t index = _vRegArray.getLength();
if (ASMJIT_UNLIKELY(index > Operand::kPackedIdCount))
return nullptr;
VirtReg* vreg;
if (_vRegArray.willGrow(&_cbHeap, 1) != kErrorOk || !(vreg = _vRegZone.allocZeroedT<VirtReg>()))
return nullptr;
vreg->_id = Operand::packId(static_cast<uint32_t>(index));
vreg->_regInfo._signature = signature;
vreg->_name = noName;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (name && name[0] != '\0')
vreg->_name = static_cast<char*>(_cbDataZone.dup(name, ::strlen(name), true));
#endif // !ASMJIT_DISABLE_LOGGING
vreg->_size = TypeId::sizeOf(typeId);
vreg->_typeId = typeId;
vreg->_alignment = static_cast<uint8_t>(std::min<uint32_t>(vreg->_size, 64));
vreg->_priority = 10;
// The following are only used by `RAPass`.
vreg->_raId = kInvalidValue;
vreg->_state = VirtReg::kStateNone;
vreg->_physId = Globals::kInvalidRegId;
_vRegArray.appendUnsafe(vreg);
return vreg;
}
Error CodeCompiler::_newReg(Reg& out, uint32_t typeId, const char* name) {
RegInfo regInfo;
Error err = ArchUtils::typeIdToRegInfo(getArchType(), typeId, regInfo);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
VirtReg* vReg = newVirtReg(typeId, regInfo.getSignature(), name);
if (ASMJIT_UNLIKELY(!vReg)) {
out.reset();
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
out._initReg(regInfo.getSignature(), vReg->getId());
return kErrorOk;
}
Error CodeCompiler::_newReg(Reg& out, uint32_t typeId, const char* nameFmt, va_list ap) {
StringBuilderTmp<256> sb;
sb.appendFormatVA(nameFmt, ap);
return _newReg(out, typeId, sb.getData());
}
Error CodeCompiler::_newReg(Reg& out, const Reg& ref, const char* name) {
RegInfo regInfo;
uint32_t typeId;
if (isVirtRegValid(ref)) {
VirtReg* vRef = getVirtReg(ref);
typeId = vRef->getTypeId();
// NOTE: It's possible to cast one register type to another if it's the
// same register kind. However, VirtReg always contains the TypeId that
// was used to create the register. This means that in some cases we may
// end up having different size of `ref` and `vRef`. In such case we
// adjust the TypeId to match the `ref` register type instead of the
// original register type, which should be the expected behavior.
uint32_t typeSize = TypeId::sizeOf(typeId);
uint32_t refSize = ref.getSize();
if (typeSize != refSize) {
if (TypeId::isInt(typeId)) {
// GP register - change TypeId to match `ref`, but keep sign of `vRef`.
switch (refSize) {
case 1: typeId = TypeId::kI8 | (typeId & 1); break;
case 2: typeId = TypeId::kI16 | (typeId & 1); break;
case 4: typeId = TypeId::kI32 | (typeId & 1); break;
case 8: typeId = TypeId::kI64 | (typeId & 1); break;
default: typeId = TypeId::kVoid; break;
}
}
else if (TypeId::isMmx(typeId)) {
// MMX register - always use 64-bit.
typeId = TypeId::kMmx64;
}
else if (TypeId::isMask(typeId)) {
// Mask register - change TypeId to match `ref` size.
switch (refSize) {
case 1: typeId = TypeId::kMask8; break;
case 2: typeId = TypeId::kMask16; break;
case 4: typeId = TypeId::kMask32; break;
case 8: typeId = TypeId::kMask64; break;
default: typeId = TypeId::kVoid; break;
}
}
else {
// VEC register - change TypeId to match `ref` size, keep vector metadata.
uint32_t elementTypeId = TypeId::elementOf(typeId);
switch (refSize) {
case 16: typeId = TypeId::_kVec128Start + (elementTypeId - TypeId::kI8); break;
case 32: typeId = TypeId::_kVec256Start + (elementTypeId - TypeId::kI8); break;
case 64: typeId = TypeId::_kVec512Start + (elementTypeId - TypeId::kI8); break;
default: typeId = TypeId::kVoid; break;
}
}
if (typeId == TypeId::kVoid)
return setLastError(DebugUtils::errored(kErrorInvalidState));
}
}
else {
typeId = ref.getType();
}
Error err = ArchUtils::typeIdToRegInfo(getArchType(), typeId, regInfo);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
VirtReg* vReg = newVirtReg(typeId, regInfo.getSignature(), name);
if (ASMJIT_UNLIKELY(!vReg)) {
out.reset();
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
out._initReg(regInfo.getSignature(), vReg->getId());
return kErrorOk;
}
Error CodeCompiler::_newReg(Reg& out, const Reg& ref, const char* nameFmt, va_list ap) {
StringBuilderTmp<256> sb;
sb.appendFormatVA(nameFmt, ap);
return _newReg(out, ref, sb.getData());
}
Error CodeCompiler::_newStack(Mem& out, uint32_t size, uint32_t alignment, const char* name) {
if (size == 0)
return setLastError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment == 0) alignment = 1;
if (!Utils::isPowerOf2(alignment))
return setLastError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment > 64) alignment = 64;
VirtReg* vReg = newVirtReg(0, 0, name);
if (ASMJIT_UNLIKELY(!vReg)) {
out.reset();
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
}
vReg->_size = size;
vReg->_isStack = true;
vReg->_alignment = static_cast<uint8_t>(alignment);
// Set the memory operand to GPD/GPQ and its id to VirtReg.
out = Mem(Init, _nativeGpReg.getType(), vReg->getId(), Reg::kRegNone, kInvalidValue, 0, 0, Mem::kSignatureMemRegHomeFlag);
return kErrorOk;
}
Error CodeCompiler::_newConst(Mem& out, uint32_t scope, const void* data, size_t size) {
CBConstPool** pPool;
if (scope == kConstScopeLocal)
pPool = &_localConstPool;
else if (scope == kConstScopeGlobal)
pPool = &_globalConstPool;
else
return setLastError(DebugUtils::errored(kErrorInvalidArgument));
if (!*pPool && !(*pPool = newConstPool()))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
CBConstPool* pool = *pPool;
size_t off;
Error err = pool->add(data, size, off);
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
out = Mem(Init,
Label::kLabelTag, // Base type.
pool->getId(), // Base id.
0, // Index type.
kInvalidValue, // Index id.
static_cast<int32_t>(off), // Offset.
static_cast<uint32_t>(size), // Size.
0); // Flags.
return kErrorOk;
}
Error CodeCompiler::alloc(Reg& reg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintAlloc, kInvalidValue);
}
Error CodeCompiler::alloc(Reg& reg, uint32_t physId) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintAlloc, physId);
}
Error CodeCompiler::alloc(Reg& reg, const Reg& physReg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintAlloc, physReg.getId());
}
Error CodeCompiler::save(Reg& reg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintSave, kInvalidValue);
}
Error CodeCompiler::spill(Reg& reg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintSpill, kInvalidValue);
}
Error CodeCompiler::unuse(Reg& reg) {
if (!reg.isVirtReg()) return kErrorOk;
return _hint(reg, CCHint::kHintUnuse, kInvalidValue);
}
uint32_t CodeCompiler::getPriority(Reg& reg) const {
if (!reg.isVirtReg()) return 0;
return getVirtRegById(reg.getId())->getPriority();
}
void CodeCompiler::setPriority(Reg& reg, uint32_t priority) {
if (!reg.isVirtReg()) return;
if (priority > 255) priority = 255;
VirtReg* vreg = getVirtRegById(reg.getId());
if (vreg) vreg->_priority = static_cast<uint8_t>(priority);
}
bool CodeCompiler::getSaveOnUnuse(Reg& reg) const {
if (!reg.isVirtReg()) return false;
VirtReg* vreg = getVirtRegById(reg.getId());
return static_cast<bool>(vreg->_saveOnUnuse);
}
void CodeCompiler::setSaveOnUnuse(Reg& reg, bool value) {
if (!reg.isVirtReg()) return;
VirtReg* vreg = getVirtRegById(reg.getId());
if (!vreg) return;
vreg->_saveOnUnuse = value;
}
void CodeCompiler::rename(Reg& reg, const char* fmt, ...) {
if (!reg.isVirtReg()) return;
VirtReg* vreg = getVirtRegById(reg.getId());
if (!vreg) return;
vreg->_name = noName;
if (fmt && fmt[0] != '\0') {
char buf[64];
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0';
vreg->_name = static_cast<char*>(_cbDataZone.dup(buf, ::strlen(buf), true));
va_end(ap);
}
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER

View file

@ -0,0 +1,738 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CODECOMPILER_H
#define _ASMJIT_BASE_CODECOMPILER_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/assembler.h"
#include "../base/codebuilder.h"
#include "../base/constpool.h"
#include "../base/func.h"
#include "../base/operand.h"
#include "../base/utils.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [Forward Declarations]
// ============================================================================
struct VirtReg;
struct TiedReg;
struct RAState;
struct RACell;
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::ConstScope]
// ============================================================================
//! Scope of the constant.
ASMJIT_ENUM(ConstScope) {
//! Local constant, always embedded right after the current function.
kConstScopeLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kConstScopeGlobal = 1
};
// ============================================================================
// [asmjit::VirtReg]
// ============================================================================
//! Virtual register data (CodeCompiler).
struct VirtReg {
//! A state of a virtual register (used during register allocation).
ASMJIT_ENUM(State) {
kStateNone = 0, //!< Not allocated, not used.
kStateReg = 1, //!< Allocated in register.
kStateMem = 2 //!< Allocated in memory or spilled.
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the virtual-register id.
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
//! Get virtual-register's name.
ASMJIT_INLINE const char* getName() const noexcept { return _name; }
//! Get a physical register type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _regInfo.getType(); }
//! Get a physical register kind.
ASMJIT_INLINE uint32_t getKind() const noexcept { return _regInfo.getKind(); }
//! Get a physical register size.
ASMJIT_INLINE uint32_t getRegSize() const noexcept { return _regInfo.getSize(); }
//! Get a register signature of this virtual register.
ASMJIT_INLINE uint32_t getSignature() const noexcept { return _regInfo.getSignature(); }
//! Get a register's type-id, see \ref TypeId.
ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _typeId; }
//! Get virtual-register's size.
ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; }
//! Get virtual-register's alignment.
ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; }
//! Get the virtual-register priority, used by compiler to decide which variable to spill.
ASMJIT_INLINE uint32_t getPriority() const noexcept { return _priority; }
//! Set the virtual-register priority.
ASMJIT_INLINE void setPriority(uint32_t priority) noexcept {
ASMJIT_ASSERT(priority <= 0xFF);
_priority = static_cast<uint8_t>(priority);
}
//! Get variable state, only used by `RAPass`.
ASMJIT_INLINE uint32_t getState() const noexcept { return _state; }
//! Set variable state, only used by `RAPass`.
ASMJIT_INLINE void setState(uint32_t state) {
ASMJIT_ASSERT(state <= 0xFF);
_state = static_cast<uint8_t>(state);
}
//! Get register index.
ASMJIT_INLINE uint32_t getPhysId() const noexcept { return _physId; }
//! Set register index.
ASMJIT_INLINE void setPhysId(uint32_t physId) {
ASMJIT_ASSERT(physId <= Globals::kInvalidRegId);
_physId = static_cast<uint8_t>(physId);
}
//! Reset register index.
ASMJIT_INLINE void resetPhysId() {
_physId = static_cast<uint8_t>(Globals::kInvalidRegId);
}
//! Get home registers mask.
ASMJIT_INLINE uint32_t getHomeMask() const { return _homeMask; }
//! Add a home register index to the home registers mask.
ASMJIT_INLINE void addHomeId(uint32_t physId) { _homeMask |= Utils::mask(physId); }
ASMJIT_INLINE bool isFixed() const noexcept { return static_cast<bool>(_isFixed); }
//! Get whether the VirtReg is only memory allocated on the stack.
ASMJIT_INLINE bool isStack() const noexcept { return static_cast<bool>(_isStack); }
//! Get whether to save variable when it's unused (spill).
ASMJIT_INLINE bool saveOnUnuse() const noexcept { return static_cast<bool>(_saveOnUnuse); }
//! Get whether the variable was changed.
ASMJIT_INLINE bool isModified() const noexcept { return static_cast<bool>(_modified); }
//! Set whether the variable was changed.
ASMJIT_INLINE void setModified(bool modified) noexcept { _modified = modified; }
//! Get home memory offset.
ASMJIT_INLINE int32_t getMemOffset() const noexcept { return _memOffset; }
//! Set home memory offset.
ASMJIT_INLINE void setMemOffset(int32_t offset) noexcept { _memOffset = offset; }
//! Get home memory cell.
ASMJIT_INLINE RACell* getMemCell() const noexcept { return _memCell; }
//! Set home memory cell.
ASMJIT_INLINE void setMemCell(RACell* cell) noexcept { _memCell = cell; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _id; //!< Virtual register id.
RegInfo _regInfo; //!< Physical register info & signature.
const char* _name; //!< Virtual name (user provided).
uint32_t _size; //!< Virtual size (can be smaller than `regInfo._size`).
uint8_t _typeId; //!< Type-id.
uint8_t _alignment; //!< Register's natural alignment (for spilling).
uint8_t _priority; //!< Allocation priority (hint for RAPass that can be ignored).
uint8_t _isFixed : 1; //!< True if this is a fixed register, never reallocated.
uint8_t _isStack : 1; //!< True if the virtual register is only used as a stack.
uint8_t _isMaterialized : 1; //!< Register is constant that is easily created by a single instruction.
uint8_t _saveOnUnuse : 1; //!< Save on unuse (at end of the variable scope).
// -------------------------------------------------------------------------
// The following members are used exclusively by RAPass. They are initialized
// when the VirtReg is created and then changed during RAPass.
// -------------------------------------------------------------------------
uint32_t _raId; //!< Register allocator work-id (used by RAPass).
int32_t _memOffset; //!< Home memory offset.
uint32_t _homeMask; //!< Mask of all registers variable has been allocated to.
uint8_t _state; //!< Variable state (connected with actual `RAState)`.
uint8_t _physId; //!< Actual register index (only used by `RAPass)`, during translate.
uint8_t _modified; //!< Whether variable was changed (connected with actual `RAState)`.
RACell* _memCell; //!< Home memory cell, used by `RAPass` (initially nullptr).
//! Temporary link to TiedReg* used by the `RAPass` used in
//! various phases, but always set back to nullptr when finished.
//!
//! This temporary data is designed to be used by algorithms that need to
//! store some data into variables themselves during compilation. But it's
//! expected that after variable is compiled & translated the data is set
//! back to zero/null. Initial value is nullptr.
TiedReg* _tied;
};
// ============================================================================
// [asmjit::CCHint]
// ============================================================================
//! Hint for register allocator (CodeCompiler).
class CCHint : public CBNode {
public:
ASMJIT_NONCOPYABLE(CCHint)
//! Hint type.
ASMJIT_ENUM(Hint) {
//! Alloc to physical reg.
kHintAlloc = 0,
//! Spill to memory.
kHintSpill = 1,
//! Save if modified.
kHintSave = 2,
//! Save if modified and mark it as unused.
kHintSaveAndUnuse = 3,
//! Mark as unused.
kHintUnuse = 4
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCHint` instance.
ASMJIT_INLINE CCHint(CodeBuilder* cb, VirtReg* vreg, uint32_t hint, uint32_t value) noexcept : CBNode(cb, kNodeHint) {
orFlags(kFlagIsRemovable | kFlagIsInformative);
_vreg = vreg;
_hint = hint;
_value = value;
}
//! Destroy the `CCHint` instance (NEVER CALLED).
ASMJIT_INLINE ~CCHint() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get variable.
ASMJIT_INLINE VirtReg* getVReg() const noexcept { return _vreg; }
//! Get hint it, see \ref Hint.
ASMJIT_INLINE uint32_t getHint() const noexcept { return _hint; }
//! Set hint it, see \ref Hint.
ASMJIT_INLINE void setHint(uint32_t hint) noexcept { _hint = hint; }
//! Get hint value.
ASMJIT_INLINE uint32_t getValue() const noexcept { return _value; }
//! Set hint value.
ASMJIT_INLINE void setValue(uint32_t value) noexcept { _value = value; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Variable.
VirtReg* _vreg;
//! Hint id.
uint32_t _hint;
//! Value.
uint32_t _value;
};
// ============================================================================
// [asmjit::CCFunc]
// ============================================================================
//! Function entry (CodeCompiler).
class CCFunc : public CBLabel {
public:
ASMJIT_NONCOPYABLE(CCFunc)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCFunc` instance.
//!
//! Always use `CodeCompiler::addFunc()` to create \ref CCFunc.
ASMJIT_INLINE CCFunc(CodeBuilder* cb) noexcept
: CBLabel(cb),
_funcDetail(),
_frameInfo(),
_exitNode(nullptr),
_end(nullptr),
_args(nullptr),
_isFinished(false) {
_type = kNodeFunc;
}
//! Destroy the `CCFunc` instance (NEVER CALLED).
ASMJIT_INLINE ~CCFunc() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get function exit `CBLabel`.
ASMJIT_INLINE CBLabel* getExitNode() const noexcept { return _exitNode; }
//! Get function exit label.
ASMJIT_INLINE Label getExitLabel() const noexcept { return _exitNode->getLabel(); }
//! Get "End of Func" sentinel.
ASMJIT_INLINE CBSentinel* getEnd() const noexcept { return _end; }
//! Get function declaration.
ASMJIT_INLINE FuncDetail& getDetail() noexcept { return _funcDetail; }
//! Get function declaration.
ASMJIT_INLINE const FuncDetail& getDetail() const noexcept { return _funcDetail; }
//! Get function declaration.
ASMJIT_INLINE FuncFrameInfo& getFrameInfo() noexcept { return _frameInfo; }
//! Get function declaration.
ASMJIT_INLINE const FuncFrameInfo& getFrameInfo() const noexcept { return _frameInfo; }
//! Get arguments count.
ASMJIT_INLINE uint32_t getArgCount() const noexcept { return _funcDetail.getArgCount(); }
//! Get returns count.
ASMJIT_INLINE uint32_t getRetCount() const noexcept { return _funcDetail.getRetCount(); }
//! Get arguments list.
ASMJIT_INLINE VirtReg** getArgs() const noexcept { return _args; }
//! Get argument at `i`.
ASMJIT_INLINE VirtReg* getArg(uint32_t i) const noexcept {
ASMJIT_ASSERT(i < getArgCount());
return _args[i];
}
//! Set argument at `i`.
ASMJIT_INLINE void setArg(uint32_t i, VirtReg* vreg) noexcept {
ASMJIT_ASSERT(i < getArgCount());
_args[i] = vreg;
}
//! Reset argument at `i`.
ASMJIT_INLINE void resetArg(uint32_t i) noexcept {
ASMJIT_ASSERT(i < getArgCount());
_args[i] = nullptr;
}
ASMJIT_INLINE uint32_t getAttributes() const noexcept { return _frameInfo.getAttributes(); }
ASMJIT_INLINE void addAttributes(uint32_t attrs) noexcept { _frameInfo.addAttributes(attrs); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
FuncDetail _funcDetail; //!< Function detail.
FuncFrameInfo _frameInfo; //!< Function frame information.
CBLabel* _exitNode; //!< Function exit.
CBSentinel* _end; //!< Function end.
VirtReg** _args; //!< Arguments array as `VirtReg`.
//! Function was finished by `Compiler::endFunc()`.
uint8_t _isFinished;
};
// ============================================================================
// [asmjit::CCFuncRet]
// ============================================================================
//! Function return (CodeCompiler).
class CCFuncRet : public CBNode {
public:
ASMJIT_NONCOPYABLE(CCFuncRet)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCFuncRet` instance.
ASMJIT_INLINE CCFuncRet(CodeBuilder* cb, const Operand_& o0, const Operand_& o1) noexcept : CBNode(cb, kNodeFuncExit) {
orFlags(kFlagIsRet);
_ret[0].copyFrom(o0);
_ret[1].copyFrom(o1);
}
//! Destroy the `CCFuncRet` instance (NEVER CALLED).
ASMJIT_INLINE ~CCFuncRet() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the first return operand.
ASMJIT_INLINE Operand& getFirst() noexcept { return static_cast<Operand&>(_ret[0]); }
//! \overload
ASMJIT_INLINE const Operand& getFirst() const noexcept { return static_cast<const Operand&>(_ret[0]); }
//! Get the second return operand.
ASMJIT_INLINE Operand& getSecond() noexcept { return static_cast<Operand&>(_ret[1]); }
//! \overload
ASMJIT_INLINE const Operand& getSecond() const noexcept { return static_cast<const Operand&>(_ret[1]); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Return operands.
Operand_ _ret[2];
};
// ============================================================================
// [asmjit::CCFuncCall]
// ============================================================================
//! Function call (CodeCompiler).
class CCFuncCall : public CBInst {
public:
ASMJIT_NONCOPYABLE(CCFuncCall)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCFuncCall` instance.
ASMJIT_INLINE CCFuncCall(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept
: CBInst(cb, instId, options, opArray, opCount),
_funcDetail(),
_args(nullptr) {
_type = kNodeFuncCall;
_ret[0].reset();
_ret[1].reset();
orFlags(kFlagIsRemovable);
}
//! Destroy the `CCFuncCall` instance (NEVER CALLED).
ASMJIT_INLINE ~CCFuncCall() noexcept {}
// --------------------------------------------------------------------------
// [Signature]
// --------------------------------------------------------------------------
//! Set function signature.
ASMJIT_INLINE Error setSignature(const FuncSignature& sign) noexcept {
return _funcDetail.init(sign);
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get function declaration.
ASMJIT_INLINE FuncDetail& getDetail() noexcept { return _funcDetail; }
//! Get function declaration.
ASMJIT_INLINE const FuncDetail& getDetail() const noexcept { return _funcDetail; }
//! Get target operand.
ASMJIT_INLINE Operand& getTarget() noexcept { return static_cast<Operand&>(_opArray[0]); }
//! \overload
ASMJIT_INLINE const Operand& getTarget() const noexcept { return static_cast<const Operand&>(_opArray[0]); }
//! Get return at `i`.
ASMJIT_INLINE Operand& getRet(uint32_t i = 0) noexcept {
ASMJIT_ASSERT(i < 2);
return static_cast<Operand&>(_ret[i]);
}
//! \overload
ASMJIT_INLINE const Operand& getRet(uint32_t i = 0) const noexcept {
ASMJIT_ASSERT(i < 2);
return static_cast<const Operand&>(_ret[i]);
}
//! Get argument at `i`.
ASMJIT_INLINE Operand& getArg(uint32_t i) noexcept {
ASMJIT_ASSERT(i < kFuncArgCountLoHi);
return static_cast<Operand&>(_args[i]);
}
//! \overload
ASMJIT_INLINE const Operand& getArg(uint32_t i) const noexcept {
ASMJIT_ASSERT(i < kFuncArgCountLoHi);
return static_cast<const Operand&>(_args[i]);
}
//! Set argument at `i` to `op`.
ASMJIT_API bool _setArg(uint32_t i, const Operand_& op) noexcept;
//! Set return at `i` to `op`.
ASMJIT_API bool _setRet(uint32_t i, const Operand_& op) noexcept;
//! Set argument at `i` to `reg`.
ASMJIT_INLINE bool setArg(uint32_t i, const Reg& reg) noexcept { return _setArg(i, reg); }
//! Set argument at `i` to `imm`.
ASMJIT_INLINE bool setArg(uint32_t i, const Imm& imm) noexcept { return _setArg(i, imm); }
//! Set return at `i` to `var`.
ASMJIT_INLINE bool setRet(uint32_t i, const Reg& reg) noexcept { return _setRet(i, reg); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
FuncDetail _funcDetail; //!< Function detail.
Operand_ _ret[2]; //!< Return.
Operand_* _args; //!< Arguments.
};
// ============================================================================
// [asmjit::CCPushArg]
// ============================================================================
//! Push argument before a function call (CodeCompiler).
class CCPushArg : public CBNode {
public:
ASMJIT_NONCOPYABLE(CCPushArg)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CCPushArg` instance.
ASMJIT_INLINE CCPushArg(CodeBuilder* cb, CCFuncCall* call, VirtReg* src, VirtReg* cvt) noexcept
: CBNode(cb, kNodePushArg),
_call(call),
_src(src),
_cvt(cvt),
_args(0) {
orFlags(kFlagIsRemovable);
}
//! Destroy the `CCPushArg` instance.
ASMJIT_INLINE ~CCPushArg() noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the associated function-call.
ASMJIT_INLINE CCFuncCall* getCall() const noexcept { return _call; }
//! Get source variable.
ASMJIT_INLINE VirtReg* getSrcReg() const noexcept { return _src; }
//! Get conversion variable.
ASMJIT_INLINE VirtReg* getCvtReg() const noexcept { return _cvt; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CCFuncCall* _call; //!< Associated `CCFuncCall`.
VirtReg* _src; //!< Source variable.
VirtReg* _cvt; //!< Temporary variable used for conversion (or null).
uint32_t _args; //!< Affected arguments bit-array.
};
// ============================================================================
// [asmjit::CodeCompiler]
// ============================================================================
//! Code emitter that uses virtual registers and performs register allocation.
//!
//! Compiler is a high-level code-generation tool that provides register
//! allocation and automatic handling of function calling conventions. It was
//! primarily designed for merging multiple parts of code into a function
//! without worrying about registers and function calling conventions.
//!
//! CodeCompiler can be used, with a minimum effort, to handle 32-bit and 64-bit
//! code at the same time.
//!
//! CodeCompiler is based on CodeBuilder and contains all the features it
//! provides. It means that the code it stores can be modified (removed, added,
//! injected) and analyzed. When the code is finalized the compiler can emit
//! the code into an Assembler to translate the abstract representation into a
//! machine code.
class ASMJIT_VIRTAPI CodeCompiler : public CodeBuilder {
public:
ASMJIT_NONCOPYABLE(CodeCompiler)
typedef CodeBuilder Base;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `CodeCompiler` instance.
ASMJIT_API CodeCompiler() noexcept;
//! Destroy the `CodeCompiler` instance.
ASMJIT_API virtual ~CodeCompiler() noexcept;
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API virtual Error onDetach(CodeHolder* code) noexcept override;
// --------------------------------------------------------------------------
// [Node-Factory]
// --------------------------------------------------------------------------
//! \internal
//!
//! Create a new `CCHint`.
ASMJIT_API CCHint* newHintNode(Reg& reg, uint32_t hint, uint32_t value) noexcept;
// --------------------------------------------------------------------------
// [Func]
// --------------------------------------------------------------------------
//! Get the current function.
ASMJIT_INLINE CCFunc* getFunc() const noexcept { return _func; }
//! Create a new `CCFunc`.
ASMJIT_API CCFunc* newFunc(const FuncSignature& sign) noexcept;
//! Add a function `node` to the stream.
ASMJIT_API CCFunc* addFunc(CCFunc* func);
//! Add a new function.
ASMJIT_API CCFunc* addFunc(const FuncSignature& sign);
//! Emit a sentinel that marks the end of the current function.
ASMJIT_API CBSentinel* endFunc();
// --------------------------------------------------------------------------
// [Ret]
// --------------------------------------------------------------------------
//! Create a new `CCFuncRet`.
ASMJIT_API CCFuncRet* newRet(const Operand_& o0, const Operand_& o1) noexcept;
//! Add a new `CCFuncRet`.
ASMJIT_API CCFuncRet* addRet(const Operand_& o0, const Operand_& o1) noexcept;
// --------------------------------------------------------------------------
// [Call]
// --------------------------------------------------------------------------
//! Create a new `CCFuncCall`.
ASMJIT_API CCFuncCall* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept;
//! Add a new `CCFuncCall`.
ASMJIT_API CCFuncCall* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept;
// --------------------------------------------------------------------------
// [Args]
// --------------------------------------------------------------------------
//! Set a function argument at `argIndex` to `reg`.
ASMJIT_API Error setArg(uint32_t argIndex, const Reg& reg);
// --------------------------------------------------------------------------
// [Hint]
// --------------------------------------------------------------------------
//! Emit a new hint (purely informational node).
ASMJIT_API Error _hint(Reg& reg, uint32_t hint, uint32_t value);
// --------------------------------------------------------------------------
// [VirtReg / Stack]
// --------------------------------------------------------------------------
//! Create a new virtual register representing the given `vti` and `signature`.
//!
//! This function accepts either register type representing a machine-specific
//! register, like `X86Reg`, or RegTag representation, which represents
//! machine independent register, and from the machine-specific register
//! is deduced.
ASMJIT_API VirtReg* newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept;
ASMJIT_API Error _newReg(Reg& out, uint32_t typeId, const char* name);
ASMJIT_API Error _newReg(Reg& out, uint32_t typeId, const char* nameFmt, va_list ap);
ASMJIT_API Error _newReg(Reg& out, const Reg& ref, const char* name);
ASMJIT_API Error _newReg(Reg& out, const Reg& ref, const char* nameFmt, va_list ap);
ASMJIT_API Error _newStack(Mem& out, uint32_t size, uint32_t alignment, const char* name);
ASMJIT_API Error _newConst(Mem& out, uint32_t scope, const void* data, size_t size);
// --------------------------------------------------------------------------
// [VirtReg]
// --------------------------------------------------------------------------
//! Get whether the virtual register `r` is valid.
ASMJIT_INLINE bool isVirtRegValid(const Reg& reg) const noexcept {
return isVirtRegValid(reg.getId());
}
//! \overload
ASMJIT_INLINE bool isVirtRegValid(uint32_t id) const noexcept {
size_t index = Operand::unpackId(id);
return index < _vRegArray.getLength();
}
//! Get \ref VirtReg associated with the given `r`.
ASMJIT_INLINE VirtReg* getVirtReg(const Reg& reg) const noexcept {
return getVirtRegById(reg.getId());
}
//! Get \ref VirtReg associated with the given `id`.
ASMJIT_INLINE VirtReg* getVirtRegById(uint32_t id) const noexcept {
ASMJIT_ASSERT(id != kInvalidValue);
size_t index = Operand::unpackId(id);
ASMJIT_ASSERT(index < _vRegArray.getLength());
return _vRegArray[index];
}
//! Get an array of all virtual registers managed by CodeCompiler.
ASMJIT_INLINE const ZoneVector<VirtReg*>& getVirtRegArray() const noexcept { return _vRegArray; }
//! Alloc a virtual register `reg`.
ASMJIT_API Error alloc(Reg& reg);
//! Alloc a virtual register `reg` using `physId` as a register id.
ASMJIT_API Error alloc(Reg& reg, uint32_t physId);
//! Alloc a virtual register `reg` using `ref` as a register operand.
ASMJIT_API Error alloc(Reg& reg, const Reg& ref);
//! Spill a virtual register `reg`.
ASMJIT_API Error spill(Reg& reg);
//! Save a virtual register `reg` if the status is `modified` at this point.
ASMJIT_API Error save(Reg& reg);
//! Unuse a virtual register `reg`.
ASMJIT_API Error unuse(Reg& reg);
//! Get priority of a virtual register `reg`.
ASMJIT_API uint32_t getPriority(Reg& reg) const;
//! Set priority of variable `reg` to `priority`.
ASMJIT_API void setPriority(Reg& reg, uint32_t priority);
//! Get save-on-unuse `reg` property.
ASMJIT_API bool getSaveOnUnuse(Reg& reg) const;
//! Set save-on-unuse `reg` property to `value`.
ASMJIT_API void setSaveOnUnuse(Reg& reg, bool value);
//! Rename variable `reg` to `name`.
//!
//! NOTE: Only new name will appear in the logger.
ASMJIT_API void rename(Reg& reg, const char* fmt, ...);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CCFunc* _func; //!< Current function.
Zone _vRegZone; //!< Allocates \ref VirtReg objects.
ZoneVector<VirtReg*> _vRegArray; //!< Stores array of \ref VirtReg pointers.
CBConstPool* _localConstPool; //!< Local constant pool, flushed at the end of each function.
CBConstPool* _globalConstPool; //!< Global constant pool, flushed at the end of the compilation.
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_CODECOMPILER_H

View file

@ -0,0 +1,236 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/assembler.h"
#include "../base/utils.h"
#include "../base/vmem.h"
#if defined(ASMJIT_BUILD_X86)
#include "../x86/x86inst.h"
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
#include "../arm/arminst.h"
#endif // ASMJIT_BUILD_ARM
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CodeEmitter - Construction / Destruction]
// ============================================================================
CodeEmitter::CodeEmitter(uint32_t type) noexcept
: _codeInfo(),
_code(nullptr),
_nextEmitter(nullptr),
_type(static_cast<uint8_t>(type)),
_destroyed(false),
_finalized(false),
_reserved(false),
_lastError(kErrorNotInitialized),
_privateData(0),
_globalHints(0),
_globalOptions(kOptionMaybeFailureCase),
_options(0),
_extraReg(),
_inlineComment(nullptr),
_none(),
_nativeGpReg(),
_nativeGpArray(nullptr) {}
CodeEmitter::~CodeEmitter() noexcept {
if (_code) {
_destroyed = true;
_code->detach(this);
}
}
// ============================================================================
// [asmjit::CodeEmitter - Events]
// ============================================================================
Error CodeEmitter::onAttach(CodeHolder* code) noexcept {
_codeInfo = code->getCodeInfo();
_lastError = kErrorOk;
_globalHints = code->getGlobalHints();
_globalOptions = code->getGlobalOptions();
return kErrorOk;
}
Error CodeEmitter::onDetach(CodeHolder* code) noexcept {
_codeInfo.reset();
_finalized = false;
_lastError = kErrorNotInitialized;
_privateData = 0;
_globalHints = 0;
_globalOptions = kOptionMaybeFailureCase;
_options = 0;
_extraReg.reset();
_inlineComment = nullptr;
_nativeGpReg.reset();
_nativeGpArray = nullptr;
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeEmitter - Code-Generation]
// ============================================================================
Error CodeEmitter::_emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) {
const Operand_* op = opArray;
switch (opCount) {
case 0: return _emit(instId, _none, _none, _none, _none);
case 1: return _emit(instId, op[0], _none, _none, _none);
case 2: return _emit(instId, op[0], op[1], _none, _none);
case 3: return _emit(instId, op[0], op[1], op[2], _none);
case 4: return _emit(instId, op[0], op[1], op[2], op[3]);
case 5: return _emit(instId, op[0], op[1], op[2], op[3], op[4], _none);
case 6: return _emit(instId, op[0], op[1], op[2], op[3], op[4], op[5]);
default:
return DebugUtils::errored(kErrorInvalidArgument);
}
}
// ============================================================================
// [asmjit::CodeEmitter - Finalize]
// ============================================================================
Label CodeEmitter::getLabelByName(const char* name, size_t nameLength, uint32_t parentId) noexcept {
return Label(_code ? _code->getLabelIdByName(name, nameLength, parentId) : static_cast<uint32_t>(0));
}
// ============================================================================
// [asmjit::CodeEmitter - Finalize]
// ============================================================================
Error CodeEmitter::finalize() {
// Finalization does nothing by default, overridden by `CodeBuilder`.
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeEmitter - Error Handling]
// ============================================================================
Error CodeEmitter::setLastError(Error error, const char* message) {
// This is fatal, CodeEmitter can't set error without being attached to `CodeHolder`.
ASMJIT_ASSERT(_code != nullptr);
// Special case used to reset the last error.
if (error == kErrorOk) {
_lastError = kErrorOk;
_globalOptions &= ~kOptionMaybeFailureCase;
return kErrorOk;
}
if (!message)
message = DebugUtils::errorAsString(error);
// Logging is skipped if the error is handled by `ErrorHandler`.
ErrorHandler* handler = _code->_errorHandler;
if (handler && handler->handleError(error, message, this))
return error;
// The handler->handleError() function may throw an exception or longjmp()
// to terminate the execution of `setLastError()`. This is the reason why
// we have delayed changing the `_error` member until now.
_lastError = error;
_globalOptions |= kOptionMaybeFailureCase;
return error;
}
// ============================================================================
// [asmjit::CodeEmitter - Helpers]
// ============================================================================
bool CodeEmitter::isLabelValid(uint32_t id) const noexcept {
size_t index = Operand::unpackId(id);
return _code && index < _code->_labels.getLength();
}
Error CodeEmitter::commentf(const char* fmt, ...) {
Error err = _lastError;
if (err) return err;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled) {
va_list ap;
va_start(ap, fmt);
err = _code->_logger->logv(fmt, ap);
va_end(ap);
}
#else
ASMJIT_UNUSED(fmt);
#endif
return err;
}
Error CodeEmitter::commentv(const char* fmt, va_list ap) {
Error err = _lastError;
if (err) return err;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (_globalOptions & kOptionLoggingEnabled)
err = _code->_logger->logv(fmt, ap);
#else
ASMJIT_UNUSED(fmt);
ASMJIT_UNUSED(ap);
#endif
return err;
}
// ============================================================================
// [asmjit::CodeEmitter - Emit]
// ============================================================================
#define OP const Operand_&
Error CodeEmitter::emit(uint32_t instId) { return _emit(instId, _none, _none, _none, _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0) { return _emit(instId, o0, _none, _none, _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1) { return _emit(instId, o0, o1, _none, _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2) { return _emit(instId, o0, o1, o2, _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3) { return _emit(instId, o0, o1, o2, o3); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4) { return _emit(instId, o0, o1, o2, o3, o4, _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, OP o5) { return _emit(instId, o0, o1, o2, o3, o4, o5); }
Error CodeEmitter::emit(uint32_t instId, int o0) { return _emit(instId, Imm(o0), _none, _none, _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, int o1) { return _emit(instId, o0, Imm(o1), _none, _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, int o2) { return _emit(instId, o0, o1, Imm(o2), _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, int o3) { return _emit(instId, o0, o1, o2, Imm(o3)); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); }
Error CodeEmitter::emit(uint32_t instId, int64_t o0) { return _emit(instId, Imm(o0), _none, _none, _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, int64_t o1) { return _emit(instId, o0, Imm(o1), _none, _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, int64_t o2) { return _emit(instId, o0, o1, Imm(o2), _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, int64_t o3) { return _emit(instId, o0, o1, o2, Imm(o3)); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int64_t o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), _none); }
Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int64_t o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); }
#undef OP
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,499 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CODEEMITTER_H
#define _ASMJIT_BASE_CODEEMITTER_H
// [Dependencies]
#include "../base/arch.h"
#include "../base/codeholder.h"
#include "../base/operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class ConstPool;
// ============================================================================
// [asmjit::CodeEmitter]
// ============================================================================
//! Provides a base foundation to emit code - specialized by \ref Assembler and
//! \ref CodeBuilder.
class ASMJIT_VIRTAPI CodeEmitter {
public:
//! CodeEmitter type.
ASMJIT_ENUM(Type) {
kTypeNone = 0,
kTypeAssembler = 1,
kTypeBuilder = 2,
kTypeCompiler = 3,
kTypeCount = 4
};
//! CodeEmitter hints - global settings that affect machine-code generation.
ASMJIT_ENUM(Hints) {
//! Emit optimized code-alignment sequences.
//!
//! Default `true`.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Default align sequence used by X86/X64 architecture is one-byte (0x90)
//! opcode that is often shown by disassemblers as nop. However there are
//! more optimized align sequences for 2-11 bytes that may execute faster.
//! If this feature is enabled AsmJit will generate specialized sequences
//! for alignment between 2 to 11 bytes.
kHintOptimizedAlign = 0x00000001U,
//! Emit jump-prediction hints.
//!
//! Default `false`.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Jump prediction is usually based on the direction of the jump. If the
//! jump is backward it is usually predicted as taken; and if the jump is
//! forward it is usually predicted as not-taken. The reason is that loops
//! generally use backward jumps and conditions usually use forward jumps.
//! However this behavior can be overridden by using instruction prefixes.
//! If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that
//! used to take into consideration prediction hints was P4. Newer processors
//! implement heuristics for branch prediction that ignores any static hints.
kHintPredictedJumps = 0x00000002U
};
//! CodeEmitter options that are merged with instruction options.
ASMJIT_ENUM(Options) {
//! Reserved, used to check for errors in `Assembler::_emit()`. In addition,
//! if an emitter is in error state it will have `kOptionMaybeFailureCase`
//! set
kOptionMaybeFailureCase = 0x00000001U,
//! Perform a strict validation before the instruction is emitted.
kOptionStrictValidation = 0x00000002U,
//! Logging is enabled and `CodeHolder::getLogger()` should return a valid
//! \ref Logger pointer.
kOptionLoggingEnabled = 0x00000004U,
//! Mask of all internal options that are not used to represent instruction
//! options, but are used to instrument Assembler and CodeBuilder. These
//! options are internal and should not be used outside of AsmJit itself.
//!
//! NOTE: Reserved options should never appear in `CBInst` options.
kOptionReservedMask = 0x00000007U,
//! Used only by Assembler to mark `_op4` and `_op5` are used.
kOptionOp4Op5Used = 0x00000008U,
//! Prevents following a jump during compilation (CodeCompiler).
kOptionUnfollow = 0x00000010U,
//! Overwrite the destination operand (CodeCompiler).
//!
//! Hint that is important for register liveness analysis. It tells the
//! compiler that the destination operand will be overwritten now or by
//! adjacent instructions. CodeCompiler knows when a register is completely
//! overwritten by a single instruction, for example you don't have to
//! mark "movaps" or "pxor x, x", however, if a pair of instructions is
//! used and the first of them doesn't completely overwrite the content
//! of the destination, CodeCompiler fails to mark that register as dead.
//!
//! X86/X64 Specific
//! ----------------
//!
//! - All instructions that always overwrite at least the size of the
//! register the virtual-register uses , for example "mov", "movq",
//! "movaps" don't need the overwrite option to be used - conversion,
//! shuffle, and other miscellaneous instructions included.
//!
//! - All instructions that clear the destination register if all operands
//! are the same, for example "xor x, x", "pcmpeqb x x", etc...
//!
//! - Consecutive instructions that partially overwrite the variable until
//! there is no old content require the `overwrite()` to be used. Some
//! examples (not always the best use cases thought):
//!
//! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
//! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
//! - `mov al, ?` followed by `and ax, 0xFF`
//! - `mov al, ?` followed by `mov ah, al`
//! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
//!
//! - If allocated variable is used temporarily for scalar operations. For
//! example if you allocate a full vector like `X86Compiler::newXmm()`
//! and then use that vector for scalar operations you should use
//! `overwrite()` directive:
//!
//! - `sqrtss x, y` - only LO element of `x` is changed, if you don't use
//! HI elements, use `X86Compiler.overwrite().sqrtss(x, y)`.
kOptionOverwrite = 0x00000020U
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API CodeEmitter(uint32_t type) noexcept;
ASMJIT_API virtual ~CodeEmitter() noexcept;
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
//! Called after the \ref CodeEmitter was attached to the \ref CodeHolder.
virtual Error onAttach(CodeHolder* code) noexcept = 0;
//! Called after the \ref CodeEmitter was detached from the \ref CodeHolder.
virtual Error onDetach(CodeHolder* code) noexcept = 0;
// --------------------------------------------------------------------------
// [Code-Generation]
// --------------------------------------------------------------------------
//! Emit instruction having max 4 operands.
virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) = 0;
//! Emit instruction having max 6 operands.
virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) = 0;
//! Emit instruction having operands stored in array.
virtual Error _emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount);
//! Create a new label.
virtual Label newLabel() = 0;
//! Create a new named label.
virtual Label newNamedLabel(
const char* name,
size_t nameLength = Globals::kInvalidIndex,
uint32_t type = Label::kTypeGlobal,
uint32_t parentId = 0) = 0;
//! Get a label by name.
//!
//! Returns invalid Label in case that the name is invalid or label was not found.
//!
//! NOTE: This function doesn't trigger ErrorHandler in case the name is
//! invalid or no such label exist. You must always check the validity of the
//! \ref Label returned.
ASMJIT_API Label getLabelByName(
const char* name,
size_t nameLength = Globals::kInvalidIndex,
uint32_t parentId = 0) noexcept;
//! Bind the `label` to the current position of the current section.
//!
//! NOTE: Attempt to bind the same label multiple times will return an error.
virtual Error bind(const Label& label) = 0;
//! Align to the `alignment` specified.
//!
//! The sequence that is used to fill the gap between the aligned location
//! and the current location depends on the align `mode`, see \ref AlignMode.
virtual Error align(uint32_t mode, uint32_t alignment) = 0;
//! Embed raw data into the code-buffer.
virtual Error embed(const void* data, uint32_t size) = 0;
//! Embed absolute label address as data (4 or 8 bytes).
virtual Error embedLabel(const Label& label) = 0;
//! Embed a constant pool into the code-buffer in the following steps:
//! 1. Align by using kAlignData to the minimum `pool` alignment.
//! 2. Bind `label` so it's bound to an aligned location.
//! 3. Emit constant pool data.
virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
//! Emit a comment string `s` with an optional `len` parameter.
virtual Error comment(const char* s, size_t len = Globals::kInvalidIndex) = 0;
// --------------------------------------------------------------------------
// [Code-Generation Status]
// --------------------------------------------------------------------------
//! Get if the CodeEmitter is initialized (i.e. attached to a \ref CodeHolder).
ASMJIT_INLINE bool isInitialized() const noexcept { return _code != nullptr; }
ASMJIT_API virtual Error finalize();
// --------------------------------------------------------------------------
// [Code Information]
// --------------------------------------------------------------------------
//! Get information about the code, see \ref CodeInfo.
ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; }
//! Get \ref CodeHolder this CodeEmitter is attached to.
ASMJIT_INLINE CodeHolder* getCode() const noexcept { return _code; }
//! Get information about the architecture, see \ref ArchInfo.
ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _codeInfo.getArchInfo(); }
//! Get if the target architecture is 32-bit.
ASMJIT_INLINE bool is32Bit() const noexcept { return getArchInfo().is32Bit(); }
//! Get if the target architecture is 64-bit.
ASMJIT_INLINE bool is64Bit() const noexcept { return getArchInfo().is64Bit(); }
//! Get the target architecture type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return getArchInfo().getType(); }
//! Get the target architecture sub-type.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return getArchInfo().getSubType(); }
//! Get the target architecture's GP register size (4 or 8 bytes).
ASMJIT_INLINE uint32_t getGpSize() const noexcept { return getArchInfo().getGpSize(); }
//! Get the number of target GP registers.
ASMJIT_INLINE uint32_t getGpCount() const noexcept { return getArchInfo().getGpCount(); }
// --------------------------------------------------------------------------
// [Code-Emitter Type]
// --------------------------------------------------------------------------
//! Get the type of this CodeEmitter, see \ref Type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
ASMJIT_INLINE bool isAssembler() const noexcept { return _type == kTypeAssembler; }
ASMJIT_INLINE bool isCodeBuilder() const noexcept { return _type == kTypeBuilder; }
ASMJIT_INLINE bool isCodeCompiler() const noexcept { return _type == kTypeCompiler; }
// --------------------------------------------------------------------------
// [Global Information]
// --------------------------------------------------------------------------
//! Get global hints.
ASMJIT_INLINE uint32_t getGlobalHints() const noexcept { return _globalHints; }
//! Get global options.
//!
//! Global options are merged with instruction options before the instruction
//! is encoded. These options have some bits reserved that are used for error
//! checking, logging, and strict validation. Other options are globals that
//! affect each instruction, for example if VEX3 is set globally, it will all
//! instructions, even those that don't have such option set.
ASMJIT_INLINE uint32_t getGlobalOptions() const noexcept { return _globalOptions; }
// --------------------------------------------------------------------------
// [Error Handling]
// --------------------------------------------------------------------------
//! Get if the object is in error state.
//!
//! Error state means that it does not consume anything unless the error
//! state is reset by calling `resetLastError()`. Use `getLastError()` to
//! get the last error that put the object into the error state.
ASMJIT_INLINE bool isInErrorState() const noexcept { return _lastError != kErrorOk; }
//! Get the last error code.
ASMJIT_INLINE Error getLastError() const noexcept { return _lastError; }
//! Set the last error code and propagate it through the error handler.
ASMJIT_API Error setLastError(Error error, const char* message = nullptr);
//! Clear the last error code and return `kErrorOk`.
ASMJIT_INLINE Error resetLastError() noexcept { return setLastError(kErrorOk); }
// --------------------------------------------------------------------------
// [Accessors That Affect the Next Instruction]
// --------------------------------------------------------------------------
//! Get options of the next instruction.
ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; }
//! Set options of the next instruction.
ASMJIT_INLINE void setOptions(uint32_t options) noexcept { _options = options; }
//! Add options of the next instruction.
ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; }
//! Reset options of the next instruction.
ASMJIT_INLINE void resetOptions() noexcept { _options = 0; }
//! Get if the extra register operand is valid.
ASMJIT_INLINE bool hasExtraReg() const noexcept { return _extraReg.isValid(); }
//! Get an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE const RegOnly& getExtraReg() const noexcept { return _extraReg; }
//! Set an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE void setExtraReg(const Reg& reg) noexcept { _extraReg.init(reg); }
//! Set an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
//! Reset an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE void resetExtraReg() noexcept { _extraReg.reset(); }
//! Get annotation of the next instruction.
ASMJIT_INLINE const char* getInlineComment() const noexcept { return _inlineComment; }
//! Set annotation of the next instruction.
//!
//! NOTE: This string is set back to null by `_emit()`, but until that it has
//! to remain valid as `CodeEmitter` is not required to make a copy of it (and
//! it would be slow to do that for each instruction).
ASMJIT_INLINE void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Reset annotation of the next instruction to null.
ASMJIT_INLINE void resetInlineComment() noexcept { _inlineComment = nullptr; }
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
//! Get if the `label` is valid (i.e. registered).
ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
return isLabelValid(label.getId());
}
//! Get if the label `id` is valid (i.e. registered).
ASMJIT_API bool isLabelValid(uint32_t id) const noexcept;
//! Emit a formatted string `fmt`.
ASMJIT_API Error commentf(const char* fmt, ...);
//! Emit a formatted string `fmt` (va_list version).
ASMJIT_API Error commentv(const char* fmt, va_list ap);
// --------------------------------------------------------------------------
// [Emit]
// --------------------------------------------------------------------------
// NOTE: These `emit()` helpers are designed to address a code-bloat generated
// by C++ compilers to call a function having many arguments. Each parameter to
// `_emit()` requires code to pass it, which means that if we default to 4
// operand parameters in `_emit()` and instId the C++ compiler would have to
// generate a virtual function call having 5 parameters, which is quite a lot.
// Since by default asm instructions have 2 to 3 operands it's better to
// introduce helpers that pass those and fill all the remaining with `_none`.
//! Emit an instruction.
ASMJIT_API Error emit(uint32_t instId);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
//! Emit an instruction that has a 32-bit signed immediate operand.
ASMJIT_API Error emit(uint32_t instId, int o0);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, int o1);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, int o2);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, int o3);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, int o4);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, int o5);
//! Emit an instruction that has a 64-bit signed immediate operand.
ASMJIT_API Error emit(uint32_t instId, int64_t o0);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, int64_t o1);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, int64_t o2);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, int64_t o3);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, int64_t o4);
//! \overload
ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, int64_t o5);
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, unsigned int o0) {
return emit(instId, static_cast<int64_t>(o0));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, unsigned int o1) {
return emit(instId, o0, static_cast<int64_t>(o1));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, unsigned int o2) {
return emit(instId, o0, o1, static_cast<int64_t>(o2));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, unsigned int o3) {
return emit(instId, o0, o1, o2, static_cast<int64_t>(o3));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, unsigned int o4) {
return emit(instId, o0, o1, o2, o3, static_cast<int64_t>(o4));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, unsigned int o5) {
return emit(instId, o0, o1, o2, o3, o4, static_cast<int64_t>(o5));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, uint64_t o0) {
return emit(instId, static_cast<int64_t>(o0));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, uint64_t o1) {
return emit(instId, o0, static_cast<int64_t>(o1));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, uint64_t o2) {
return emit(instId, o0, o1, static_cast<int64_t>(o2));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, uint64_t o3) {
return emit(instId, o0, o1, o2, static_cast<int64_t>(o3));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, uint64_t o4) {
return emit(instId, o0, o1, o2, o3, static_cast<int64_t>(o4));
}
//! \overload
ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, uint64_t o5) {
return emit(instId, o0, o1, o2, o3, o4, static_cast<int64_t>(o5));
}
ASMJIT_INLINE Error emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) {
return _emitOpArray(instId, opArray, opCount);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CodeInfo _codeInfo; //!< Basic information about the code (matches CodeHolder::_codeInfo).
CodeHolder* _code; //!< CodeHolder the CodeEmitter is attached to.
CodeEmitter* _nextEmitter; //!< Linked list of `CodeEmitter`s attached to the same \ref CodeHolder.
uint8_t _type; //!< See CodeEmitter::Type.
uint8_t _destroyed; //!< Set by ~CodeEmitter() before calling `_code->detach()`.
uint8_t _finalized; //!< True if the CodeEmitter is finalized (CodeBuilder & CodeCompiler).
uint8_t _reserved; //!< \internal
Error _lastError; //!< Last error code.
uint32_t _privateData; //!< Internal private data used freely by any CodeEmitter.
uint32_t _globalHints; //!< Global hints, always in sync with CodeHolder.
uint32_t _globalOptions; //!< Global options, combined with `_options` before used by each instruction.
uint32_t _options; //!< Used to pass instruction options (affects the next instruction).
RegOnly _extraReg; //!< Extra register (op-mask {k} on AVX-512) (affects the next instruction).
const char* _inlineComment; //!< Inline comment of the next instruction (affects the next instruction).
Operand_ _none; //!< Used to pass unused operands to `_emit()` instead of passing null.
Reg _nativeGpReg; //!< Native GP register with zero id.
const Reg* _nativeGpArray; //!< Array of native registers indexed from zero.
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CODEEMITTER_H

View file

@ -0,0 +1,696 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/assembler.h"
#include "../base/utils.h"
#include "../base/vmem.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
// ============================================================================
// [asmjit::CodeHolder - Utilities]
// ============================================================================
static void CodeHolder_setGlobalOption(CodeHolder* self, uint32_t clear, uint32_t add) noexcept {
// Modify global options of `CodeHolder` itself.
self->_globalOptions = (self->_globalOptions & ~clear) | add;
// Modify all global options of all `CodeEmitter`s attached.
CodeEmitter* emitter = self->_emitters;
while (emitter) {
emitter->_globalOptions = (emitter->_globalOptions & ~clear) | add;
emitter = emitter->_nextEmitter;
}
}
static void CodeHolder_resetInternal(CodeHolder* self, bool releaseMemory) noexcept {
// Detach all `CodeEmitter`s.
while (self->_emitters)
self->detach(self->_emitters);
// Reset everything into its construction state.
self->_codeInfo.reset();
self->_globalHints = 0;
self->_globalOptions = 0;
self->_logger = nullptr;
self->_errorHandler = nullptr;
self->_unresolvedLabelsCount = 0;
self->_trampolinesSize = 0;
// Reset all sections.
size_t numSections = self->_sections.getLength();
for (size_t i = 0; i < numSections; i++) {
SectionEntry* section = self->_sections[i];
if (section->_buffer.hasData() && !section->_buffer.isExternal())
Internal::releaseMemory(section->_buffer._data);
section->_buffer._data = nullptr;
section->_buffer._capacity = 0;
}
// Reset zone allocator and all containers using it.
ZoneHeap* heap = &self->_baseHeap;
self->_namedLabels.reset(heap);
self->_relocations.reset();
self->_labels.reset();
self->_sections.reset();
heap->reset(&self->_baseZone);
self->_baseZone.reset(releaseMemory);
}
// ============================================================================
// [asmjit::CodeHolder - Construction / Destruction]
// ============================================================================
CodeHolder::CodeHolder() noexcept
: _codeInfo(),
_globalHints(0),
_globalOptions(0),
_emitters(nullptr),
_cgAsm(nullptr),
_logger(nullptr),
_errorHandler(nullptr),
_unresolvedLabelsCount(0),
_trampolinesSize(0),
_baseZone(16384 - Zone::kZoneOverhead),
_dataZone(16384 - Zone::kZoneOverhead),
_baseHeap(&_baseZone),
_namedLabels(&_baseHeap) {}
CodeHolder::~CodeHolder() noexcept {
CodeHolder_resetInternal(this, true);
}
// ============================================================================
// [asmjit::CodeHolder - Init / Reset]
// ============================================================================
Error CodeHolder::init(const CodeInfo& info) noexcept {
// Cannot reinitialize if it's locked or there is one or more CodeEmitter
// attached.
if (isInitialized())
return DebugUtils::errored(kErrorAlreadyInitialized);
// If we are just initializing there should be no emitters attached).
ASMJIT_ASSERT(_emitters == nullptr);
// Create the default section and insert it to the `_sections` array.
Error err = _sections.willGrow(&_baseHeap);
if (err == kErrorOk) {
SectionEntry* se = _baseZone.allocZeroedT<SectionEntry>();
if (ASMJIT_LIKELY(se)) {
se->_flags = SectionEntry::kFlagExec | SectionEntry::kFlagConst;
se->_setDefaultName('.', 't', 'e', 'x', 't');
_sections.appendUnsafe(se);
}
else {
err = DebugUtils::errored(kErrorNoHeapMemory);
}
}
if (ASMJIT_UNLIKELY(err)) {
_baseZone.reset(false);
return err;
}
else {
_codeInfo = info;
return kErrorOk;
}
}
void CodeHolder::reset(bool releaseMemory) noexcept {
CodeHolder_resetInternal(this, releaseMemory);
}
// ============================================================================
// [asmjit::CodeHolder - Attach / Detach]
// ============================================================================
Error CodeHolder::attach(CodeEmitter* emitter) noexcept {
// Catch a possible misuse of the API.
if (!emitter)
return DebugUtils::errored(kErrorInvalidArgument);
uint32_t type = emitter->getType();
if (type == CodeEmitter::kTypeNone || type >= CodeEmitter::kTypeCount)
return DebugUtils::errored(kErrorInvalidState);
// This is suspicious, but don't fail if `emitter` matches.
if (emitter->_code != nullptr) {
if (emitter->_code == this) return kErrorOk;
return DebugUtils::errored(kErrorInvalidState);
}
// Special case - attach `Assembler`.
CodeEmitter** pSlot = nullptr;
if (type == CodeEmitter::kTypeAssembler) {
if (_cgAsm)
return DebugUtils::errored(kErrorSlotOccupied);
pSlot = reinterpret_cast<CodeEmitter**>(&_cgAsm);
}
Error err = emitter->onAttach(this);
if (err != kErrorOk) return err;
// Add to a single-linked list of `CodeEmitter`s.
emitter->_nextEmitter = _emitters;
_emitters = emitter;
if (pSlot) *pSlot = emitter;
// Establish the connection.
emitter->_code = this;
return kErrorOk;
}
Error CodeHolder::detach(CodeEmitter* emitter) noexcept {
if (!emitter)
return DebugUtils::errored(kErrorInvalidArgument);
if (emitter->_code != this)
return DebugUtils::errored(kErrorInvalidState);
uint32_t type = emitter->getType();
Error err = kErrorOk;
// NOTE: We always detach if we were asked to, if error happens during
// `emitter->onDetach()` we just propagate it, but the CodeEmitter will
// be detached.
if (!emitter->_destroyed) {
if (type == CodeEmitter::kTypeAssembler)
static_cast<Assembler*>(emitter)->sync();
err = emitter->onDetach(this);
}
// Special case - detach `Assembler`.
if (type == CodeEmitter::kTypeAssembler)
_cgAsm = nullptr;
// Remove from a single-linked list of `CodeEmitter`s.
CodeEmitter** pPrev = &_emitters;
for (;;) {
ASMJIT_ASSERT(*pPrev != nullptr);
CodeEmitter* cur = *pPrev;
if (cur == emitter) {
*pPrev = emitter->_nextEmitter;
break;
}
pPrev = &cur->_nextEmitter;
}
emitter->_code = nullptr;
emitter->_nextEmitter = nullptr;
return err;
}
// ============================================================================
// [asmjit::CodeHolder - Sync]
// ============================================================================
void CodeHolder::sync() noexcept {
if (_cgAsm) _cgAsm->sync();
}
// ============================================================================
// [asmjit::CodeHolder - Result Information]
// ============================================================================
size_t CodeHolder::getCodeSize() const noexcept {
// Reflect all changes first.
const_cast<CodeHolder*>(this)->sync();
// TODO: Support sections.
return _sections[0]->_buffer._length + getTrampolinesSize();
}
// ============================================================================
// [asmjit::CodeHolder - Logging & Error Handling]
// ============================================================================
#if !defined(ASMJIT_DISABLE_LOGGING)
void CodeHolder::setLogger(Logger* logger) noexcept {
uint32_t opt = 0;
if (logger) opt = CodeEmitter::kOptionLoggingEnabled;
_logger = logger;
CodeHolder_setGlobalOption(this, CodeEmitter::kOptionLoggingEnabled, opt);
}
#endif // !ASMJIT_DISABLE_LOGGING
Error CodeHolder::setErrorHandler(ErrorHandler* handler) noexcept {
_errorHandler = handler;
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeHolder - Sections]
// ============================================================================
static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t n) noexcept {
uint8_t* oldData = cb->_data;
uint8_t* newData;
if (oldData && !cb->isExternal())
newData = static_cast<uint8_t*>(Internal::reallocMemory(oldData, n));
else
newData = static_cast<uint8_t*>(Internal::allocMemory(n));
if (ASMJIT_UNLIKELY(!newData))
return DebugUtils::errored(kErrorNoHeapMemory);
cb->_data = newData;
cb->_capacity = n;
// Update the `Assembler` pointers if attached. Maybe we should introduce an
// event for this, but since only one Assembler can be attached at a time it
// should not matter how these pointers are updated.
Assembler* a = self->_cgAsm;
if (a && &a->_section->_buffer == cb) {
size_t offset = a->getOffset();
a->_bufferData = newData;
a->_bufferEnd = newData + n;
a->_bufferPtr = newData + offset;
}
return kErrorOk;
}
Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
// This is most likely called by `Assembler` so `sync()` shouldn't be needed,
// however, if this is called by the user and the currently attached Assembler
// did generate some code we could lose that, so sync now and make sure the
// section length is updated.
if (_cgAsm) _cgAsm->sync();
// Now the length of the section must be valid.
size_t length = cb->getLength();
if (ASMJIT_UNLIKELY(n > IntTraits<uintptr_t>::maxValue() - length))
return DebugUtils::errored(kErrorNoHeapMemory);
// We can now check if growing the buffer is really necessary. It's unlikely
// that this function is called while there is still room for `n` bytes.
size_t capacity = cb->getCapacity();
size_t required = cb->getLength() + n;
if (ASMJIT_UNLIKELY(required <= capacity)) return kErrorOk;
if (cb->isFixedSize())
return DebugUtils::errored(kErrorCodeTooLarge);
if (capacity < 8096)
capacity = 8096;
else
capacity += Globals::kAllocOverhead;
do {
size_t old = capacity;
if (capacity < Globals::kAllocThreshold)
capacity *= 2;
else
capacity += Globals::kAllocThreshold;
if (capacity < Globals::kAllocThreshold)
capacity *= 2;
else
capacity += Globals::kAllocThreshold;
// Overflow.
if (ASMJIT_UNLIKELY(old > capacity))
return DebugUtils::errored(kErrorNoHeapMemory);
} while (capacity - Globals::kAllocOverhead < required);
return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead);
}
Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
size_t capacity = cb->getCapacity();
if (n <= capacity) return kErrorOk;
if (cb->isFixedSize())
return DebugUtils::errored(kErrorCodeTooLarge);
// We must sync, as mentioned in `growBuffer()` as well.
if (_cgAsm) _cgAsm->sync();
return CodeHolder_reserveInternal(this, cb, n);
}
// ============================================================================
// [asmjit::CodeHolder - Labels & Symbols]
// ============================================================================
namespace {
//! \internal
//!
//! Only used to lookup a label from `_namedLabels`.
class LabelByName {
public:
ASMJIT_INLINE LabelByName(const char* name, size_t nameLength, uint32_t hVal) noexcept
: name(name),
nameLength(static_cast<uint32_t>(nameLength)) {}
ASMJIT_INLINE bool matches(const LabelEntry* entry) const noexcept {
return static_cast<uint32_t>(entry->getNameLength()) == nameLength &&
::memcmp(entry->getName(), name, nameLength) == 0;
}
const char* name;
uint32_t nameLength;
uint32_t hVal;
};
// Returns a hash of `name` and fixes `nameLength` if it's `Globals::kInvalidIndex`.
static uint32_t CodeHolder_hashNameAndFixLen(const char* name, size_t& nameLength) noexcept {
uint32_t hVal = 0;
if (nameLength == Globals::kInvalidIndex) {
size_t i = 0;
for (;;) {
uint8_t c = static_cast<uint8_t>(name[i]);
if (!c) break;
hVal = Utils::hashRound(hVal, c);
i++;
}
nameLength = i;
}
else {
for (size_t i = 0; i < nameLength; i++) {
uint8_t c = static_cast<uint8_t>(name[i]);
if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName);
hVal = Utils::hashRound(hVal, c);
}
}
return hVal;
}
} // anonymous namespace
LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept {
LabelLink* link = _baseHeap.allocT<LabelLink>();
if (ASMJIT_UNLIKELY(!link)) return nullptr;
link->prev = le->_links;
le->_links = link;
link->sectionId = sectionId;
link->relocId = RelocEntry::kInvalidId;
link->offset = offset;
link->rel = rel;
_unresolvedLabelsCount++;
return link;
}
Error CodeHolder::newLabelId(uint32_t& idOut) noexcept {
idOut = 0;
size_t index = _labels.getLength();
if (ASMJIT_LIKELY(index >= Operand::kPackedIdCount))
return DebugUtils::errored(kErrorLabelIndexOverflow);
ASMJIT_PROPAGATE(_labels.willGrow(&_baseHeap));
LabelEntry* le = _baseHeap.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le))
return DebugUtils::errored(kErrorNoHeapMemory);;
uint32_t id = Operand::packId(static_cast<uint32_t>(index));
le->_setId(id);
le->_parentId = 0;
le->_sectionId = SectionEntry::kInvalidId;
le->_offset = 0;
_labels.appendUnsafe(le);
idOut = id;
return kErrorOk;
}
Error CodeHolder::newNamedLabelId(uint32_t& idOut, const char* name, size_t nameLength, uint32_t type, uint32_t parentId) noexcept {
idOut = 0;
uint32_t hVal = CodeHolder_hashNameAndFixLen(name, nameLength);
if (ASMJIT_UNLIKELY(nameLength == 0))
return DebugUtils::errored(kErrorInvalidLabelName);
if (ASMJIT_UNLIKELY(nameLength > Globals::kMaxLabelLength))
return DebugUtils::errored(kErrorLabelNameTooLong);
switch (type) {
case Label::kTypeLocal:
if (ASMJIT_UNLIKELY(Operand::unpackId(parentId) >= _labels.getLength()))
return DebugUtils::errored(kErrorInvalidParentLabel);
hVal ^= parentId;
break;
case Label::kTypeGlobal:
if (ASMJIT_UNLIKELY(parentId != 0))
return DebugUtils::errored(kErrorNonLocalLabelCantHaveParent);
break;
default:
return DebugUtils::errored(kErrorInvalidArgument);
}
// Don't allow to insert duplicates. Local labels allow duplicates that have
// different id, this is already accomplished by having a different hashes
// between the same label names having different parent labels.
LabelEntry* le = _namedLabels.get(LabelByName(name, nameLength, hVal));
if (ASMJIT_UNLIKELY(le))
return DebugUtils::errored(kErrorLabelAlreadyDefined);
Error err = kErrorOk;
size_t index = _labels.getLength();
if (ASMJIT_UNLIKELY(index >= Operand::kPackedIdCount))
return DebugUtils::errored(kErrorLabelIndexOverflow);
ASMJIT_PROPAGATE(_labels.willGrow(&_baseHeap));
le = _baseHeap.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le))
return DebugUtils::errored(kErrorNoHeapMemory);
uint32_t id = Operand::packId(static_cast<uint32_t>(index));
le->_hVal = hVal;
le->_setId(id);
le->_type = static_cast<uint8_t>(type);
le->_parentId = 0;
le->_sectionId = SectionEntry::kInvalidId;
le->_offset = 0;
if (le->_name.mustEmbed(nameLength)) {
le->_name.setEmbedded(name, nameLength);
}
else {
char* nameExternal = static_cast<char*>(_dataZone.dup(name, nameLength, true));
if (ASMJIT_UNLIKELY(!nameExternal))
return DebugUtils::errored(kErrorNoHeapMemory);
le->_name.setExternal(nameExternal, nameLength);
}
_labels.appendUnsafe(le);
_namedLabels.put(le);
idOut = id;
return err;
}
uint32_t CodeHolder::getLabelIdByName(const char* name, size_t nameLength, uint32_t parentId) noexcept {
uint32_t hVal = CodeHolder_hashNameAndFixLen(name, nameLength);
if (ASMJIT_UNLIKELY(!nameLength)) return 0;
LabelEntry* le = _namedLabels.get(LabelByName(name, nameLength, hVal));
return le ? le->getId() : static_cast<uint32_t>(0);
}
// ============================================================================
// [asmjit::CodeEmitter - Relocations]
// ============================================================================
//! Encode MOD byte.
static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept {
return (m << 6) | (o << 3) | rm;
}
Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t type, uint32_t size) noexcept {
ASMJIT_PROPAGATE(_relocations.willGrow(&_baseHeap));
size_t index = _relocations.getLength();
if (ASMJIT_UNLIKELY(index > size_t(0xFFFFFFFFU)))
return DebugUtils::errored(kErrorRelocIndexOverflow);
RelocEntry* re = _baseHeap.allocZeroedT<RelocEntry>();
if (ASMJIT_UNLIKELY(!re))
return DebugUtils::errored(kErrorNoHeapMemory);
re->_id = static_cast<uint32_t>(index);
re->_type = static_cast<uint8_t>(type);
re->_size = static_cast<uint8_t>(size);
re->_sourceSectionId = SectionEntry::kInvalidId;
re->_targetSectionId = SectionEntry::kInvalidId;
_relocations.appendUnsafe(re);
*dst = re;
return kErrorOk;
}
// TODO: Support multiple sections, this only relocates the first.
// TODO: This should go to Runtime as it's responsible for relocating the
// code, CodeHolder should just hold it.
size_t CodeHolder::relocate(void* _dst, uint64_t baseAddress) const noexcept {
SectionEntry* section = _sections[0];
ASMJIT_ASSERT(section != nullptr);
uint8_t* dst = static_cast<uint8_t*>(_dst);
if (baseAddress == Globals::kNoBaseAddress)
baseAddress = static_cast<uint64_t>((uintptr_t)dst);
#if !defined(ASMJIT_DISABLE_LOGGING)
Logger* logger = getLogger();
#endif // ASMJIT_DISABLE_LOGGING
size_t minCodeSize = section->getBuffer().getLength(); // Minimum code size.
size_t maxCodeSize = getCodeSize(); // Includes all possible trampolines.
// We will copy the exact size of the generated code. Extra code for trampolines
// is generated on-the-fly by the relocator (this code doesn't exist at the moment).
::memcpy(dst, section->_buffer._data, minCodeSize);
// Trampoline offset from the beginning of dst/baseAddress.
size_t trampOffset = minCodeSize;
// Relocate all recorded locations.
size_t numRelocs = _relocations.getLength();
const RelocEntry* const* reArray = _relocations.getData();
for (size_t i = 0; i < numRelocs; i++) {
const RelocEntry* re = reArray[i];
// Possibly deleted or optimized out relocation entry.
if (re->getType() == RelocEntry::kTypeNone)
continue;
uint64_t ptr = re->getData();
size_t codeOffset = static_cast<size_t>(re->getSourceOffset());
// Make sure that the `RelocEntry` is correct, we don't want to write
// out of bounds in `dst`.
if (ASMJIT_UNLIKELY(codeOffset + re->getSize() > maxCodeSize))
return DebugUtils::errored(kErrorInvalidRelocEntry);
// Whether to use trampoline, can be only used if relocation type is `kRelocTrampoline`.
bool useTrampoline = false;
switch (re->getType()) {
case RelocEntry::kTypeAbsToAbs: {
break;
}
case RelocEntry::kTypeRelToAbs: {
ptr += baseAddress;
break;
}
case RelocEntry::kTypeAbsToRel: {
ptr -= baseAddress + re->getSourceOffset() + re->getSize();
break;
}
case RelocEntry::kTypeTrampoline: {
if (re->getSize() != 4)
return DebugUtils::errored(kErrorInvalidRelocEntry);
ptr -= baseAddress + re->getSourceOffset() + re->getSize();
if (!Utils::isInt32(static_cast<int64_t>(ptr))) {
ptr = (uint64_t)trampOffset - re->getSourceOffset() - re->getSize();
useTrampoline = true;
}
break;
}
default:
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
switch (re->getSize()) {
case 1:
Utils::writeU8(dst + codeOffset, static_cast<uint32_t>(ptr & 0xFFU));
break;
case 4:
Utils::writeU32u(dst + codeOffset, static_cast<uint32_t>(ptr & 0xFFFFFFFFU));
break;
case 8:
Utils::writeU64u(dst + codeOffset, ptr);
break;
default:
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
// Handle the trampoline case.
if (useTrampoline) {
// Bytes that replace [REX, OPCODE] bytes.
uint32_t byte0 = 0xFF;
uint32_t byte1 = dst[codeOffset - 1];
if (byte1 == 0xE8) {
// Patch CALL/MOD byte to FF/2 (-> 0x15).
byte1 = x86EncodeMod(0, 2, 5);
}
else if (byte1 == 0xE9) {
// Patch JMP/MOD byte to FF/4 (-> 0x25).
byte1 = x86EncodeMod(0, 4, 5);
}
else {
return DebugUtils::errored(kErrorInvalidRelocEntry);
}
// Patch `jmp/call` instruction.
ASMJIT_ASSERT(codeOffset >= 2);
dst[codeOffset - 2] = static_cast<uint8_t>(byte0);
dst[codeOffset - 1] = static_cast<uint8_t>(byte1);
// Store absolute address and advance the trampoline pointer.
Utils::writeU64u(dst + trampOffset, re->getData());
trampOffset += 8;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (logger)
logger->logf("[reloc] dq 0x%016llX ; Trampoline\n", re->getData());
#endif // !ASMJIT_DISABLE_LOGGING
}
}
// If there are no trampolines this is the same as `minCodeSize`.
return trampOffset;
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,748 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CODEHOLDER_H
#define _ASMJIT_BASE_CODEHOLDER_H
// [Dependencies]
#include "../base/arch.h"
#include "../base/func.h"
#include "../base/logging.h"
#include "../base/operand.h"
#include "../base/simdtypes.h"
#include "../base/utils.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class Assembler;
class CodeEmitter;
class CodeHolder;
// ============================================================================
// [asmjit::AlignMode]
// ============================================================================
//! Align mode.
ASMJIT_ENUM(AlignMode) {
kAlignCode = 0, //!< Align executable code.
kAlignData = 1, //!< Align non-executable code.
kAlignZero = 2, //!< Align by a sequence of zeros.
kAlignCount //!< Count of alignment modes.
};
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
//! Error handler can be used to override the default behavior of error handling
//! available to all classes that inherit \ref CodeEmitter. See \ref handleError().
class ASMJIT_VIRTAPI ErrorHandler {
public:
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `ErrorHandler` instance.
ASMJIT_API ErrorHandler() noexcept;
//! Destroy the `ErrorHandler` instance.
ASMJIT_API virtual ~ErrorHandler() noexcept;
// --------------------------------------------------------------------------
// [Handle Error]
// --------------------------------------------------------------------------
//! Error handler (abstract).
//!
//! Error handler is called after an error happened and before it's propagated
//! to the caller. There are multiple ways how the error handler can be used:
//!
//! 1. Returning `true` or `false` from `handleError()`. If `true` is returned
//! it means that the error was reported and AsmJit can continue execution.
//! The reported error still be propagated to the caller, but won't put the
//! CodeEmitter into an error state (it won't set last-error). However,
//! returning `false` means that the error cannot be handled - in such case
//! it stores the error, which can be then retrieved by using `getLastError()`.
//! Returning `false` is the default behavior when no error handler is present.
//! To put the assembler into a non-error state again a `resetLastError()` must
//! be called.
//!
//! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely
//! exception-safe, but you can throw exception from your error handler if
//! this way is the preferred way of handling errors in your project. Throwing
//! an exception acts virtually as returning `true` as AsmJit won't be able
//! to store the error because the exception changes execution path.
//!
//! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts
//! `CodeEmitter` to a consistent state before calling the `handleError()`
//! so `longjmp()` can be used without any issues to cancel the code
//! generation if an error occurred. There is no difference between
//! exceptions and longjmp() from AsmJit's perspective.
virtual bool handleError(Error err, const char* message, CodeEmitter* origin) = 0;
};
// ============================================================================
// [asmjit::CodeInfo]
// ============================================================================
//! Basic information about a code (or target). It describes its architecture,
//! code generation mode (or optimization level), and base address.
class CodeInfo {
public:
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE CodeInfo() noexcept
: _archInfo(),
_stackAlignment(0),
_cdeclCallConv(CallConv::kIdNone),
_stdCallConv(CallConv::kIdNone),
_fastCallConv(CallConv::kIdNone),
_baseAddress(Globals::kNoBaseAddress) {}
ASMJIT_INLINE CodeInfo(const CodeInfo& other) noexcept { init(other); }
explicit ASMJIT_INLINE CodeInfo(uint32_t archType, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
: _archInfo(archType, archMode),
_packedMiscInfo(0),
_baseAddress(baseAddress) {}
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isInitialized() const noexcept {
return _archInfo._type != ArchInfo::kTypeNone;
}
ASMJIT_INLINE void init(const CodeInfo& other) noexcept {
_archInfo = other._archInfo;
_packedMiscInfo = other._packedMiscInfo;
_baseAddress = other._baseAddress;
}
ASMJIT_INLINE void init(uint32_t archType, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept {
_archInfo.init(archType, archMode);
_packedMiscInfo = 0;
_baseAddress = baseAddress;
}
ASMJIT_INLINE void reset() noexcept {
_archInfo.reset();
_stackAlignment = 0;
_cdeclCallConv = CallConv::kIdNone;
_stdCallConv = CallConv::kIdNone;
_fastCallConv = CallConv::kIdNone;
_baseAddress = Globals::kNoBaseAddress;
}
// --------------------------------------------------------------------------
// [Architecture Information]
// --------------------------------------------------------------------------
//! Get architecture information, see \ref ArchInfo.
ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _archInfo; }
//! Get architecture type, see \ref ArchInfo::Type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archInfo.getType(); }
//! Get architecture sub-type, see \ref ArchInfo::SubType.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _archInfo.getSubType(); }
//! Get a size of a GP register of the architecture the code is using.
ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _archInfo.getGpSize(); }
//! Get number of GP registers available of the architecture the code is using.
ASMJIT_INLINE uint32_t getGpCount() const noexcept { return _archInfo.getGpCount(); }
// --------------------------------------------------------------------------
// [High-Level Information]
// --------------------------------------------------------------------------
//! Get a natural stack alignment that must be honored (or 0 if not known).
ASMJIT_INLINE uint32_t getStackAlignment() const noexcept { return _stackAlignment; }
//! Set a natural stack alignment that must be honored.
ASMJIT_INLINE void setStackAlignment(uint8_t sa) noexcept { _stackAlignment = static_cast<uint8_t>(sa); }
ASMJIT_INLINE uint32_t getCdeclCallConv() const noexcept { return _cdeclCallConv; }
ASMJIT_INLINE void setCdeclCallConv(uint32_t cc) noexcept { _cdeclCallConv = static_cast<uint8_t>(cc); }
ASMJIT_INLINE uint32_t getStdCallConv() const noexcept { return _stdCallConv; }
ASMJIT_INLINE void setStdCallConv(uint32_t cc) noexcept { _stdCallConv = static_cast<uint8_t>(cc); }
ASMJIT_INLINE uint32_t getFastCallConv() const noexcept { return _fastCallConv; }
ASMJIT_INLINE void setFastCallConv(uint32_t cc) noexcept { _fastCallConv = static_cast<uint8_t>(cc); }
// --------------------------------------------------------------------------
// [Addressing Information]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
ASMJIT_INLINE uint64_t getBaseAddress() const noexcept { return _baseAddress; }
ASMJIT_INLINE void setBaseAddress(uint64_t p) noexcept { _baseAddress = p; }
ASMJIT_INLINE void resetBaseAddress() noexcept { _baseAddress = Globals::kNoBaseAddress; }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE CodeInfo& operator=(const CodeInfo& other) noexcept { init(other); return *this; }
ASMJIT_INLINE bool operator==(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) == 0; }
ASMJIT_INLINE bool operator!=(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) != 0; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
ArchInfo _archInfo; //!< Architecture information.
union {
struct {
uint8_t _stackAlignment; //!< Natural stack alignment (ARCH+OS).
uint8_t _cdeclCallConv; //!< Default CDECL calling convention.
uint8_t _stdCallConv; //!< Default STDCALL calling convention.
uint8_t _fastCallConv; //!< Default FASTCALL calling convention.
};
uint32_t _packedMiscInfo; //!< \internal
};
uint64_t _baseAddress; //!< Base address.
};
// ============================================================================
// [asmjit::CodeBuffer]
// ============================================================================
//! Code or data buffer.
struct CodeBuffer {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool hasData() const noexcept { return _data != nullptr; }
ASMJIT_INLINE uint8_t* getData() noexcept { return _data; }
ASMJIT_INLINE const uint8_t* getData() const noexcept { return _data; }
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; }
ASMJIT_INLINE bool isExternal() const noexcept { return _isExternal; }
ASMJIT_INLINE bool isFixedSize() const noexcept { return _isFixedSize; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint8_t* _data; //!< The content of the buffer (data).
size_t _length; //!< Number of bytes of `data` used.
size_t _capacity; //!< Buffer capacity (in bytes).
bool _isExternal; //!< True if this is external buffer.
bool _isFixedSize; //!< True if this buffer cannot grow.
};
// ============================================================================
// [asmjit::SectionEntry]
// ============================================================================
//! Section entry.
class SectionEntry {
public:
ASMJIT_ENUM(Id) {
kInvalidId = 0xFFFFFFFFU //!< Invalid section id.
};
//! Section flags.
ASMJIT_ENUM(Flags) {
kFlagExec = 0x00000001U, //!< Executable (.text sections).
kFlagConst = 0x00000002U, //!< Read-only (.text and .data sections).
kFlagZero = 0x00000004U, //!< Zero initialized by the loader (BSS).
kFlagInfo = 0x00000008U, //!< Info / comment flag.
kFlagImplicit = 0x80000000U //!< Section created implicitly (can be deleted by the Runtime).
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
ASMJIT_INLINE const char* getName() const noexcept { return _name; }
ASMJIT_INLINE void _setDefaultName(
char c0 = 0, char c1 = 0, char c2 = 0, char c3 = 0,
char c4 = 0, char c5 = 0, char c6 = 0, char c7 = 0) noexcept {
_nameAsU32[0] = Utils::pack32_4x8(c0, c1, c2, c3);
_nameAsU32[1] = Utils::pack32_4x8(c4, c5, c6, c7);
}
ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
ASMJIT_INLINE void addFlags(uint32_t flags) noexcept { _flags |= flags; }
ASMJIT_INLINE void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; }
ASMJIT_INLINE void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
ASMJIT_INLINE size_t getPhysicalSize() const noexcept { return _buffer.getLength(); }
ASMJIT_INLINE size_t getVirtualSize() const noexcept { return _virtualSize; }
ASMJIT_INLINE void setVirtualSize(uint32_t size) noexcept { _virtualSize = size; }
ASMJIT_INLINE CodeBuffer& getBuffer() noexcept { return _buffer; }
ASMJIT_INLINE const CodeBuffer& getBuffer() const noexcept { return _buffer; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint32_t _id; //!< Section id.
uint32_t _flags; //!< Section flags.
uint32_t _alignment; //!< Section alignment requirements (0 if no requirements).
uint32_t _virtualSize; //!< Virtual size of the section (zero initialized mostly).
union {
char _name[36]; //!< Section name (max 35 characters, PE allows max 8).
uint32_t _nameAsU32[36 / 4]; //!< Section name as `uint32_t[]` (only optimization).
};
CodeBuffer _buffer; //!< Code or data buffer.
};
// ============================================================================
// [asmjit::LabelLink]
// ============================================================================
//! Data structure used to link labels.
struct LabelLink {
LabelLink* prev; //!< Previous link (single-linked list).
uint32_t sectionId; //!< Section id.
uint32_t relocId; //!< Relocation id or RelocEntry::kInvalidId.
size_t offset; //!< Label offset relative to the start of the section.
intptr_t rel; //!< Inlined rel8/rel32.
};
// ============================================================================
// [asmjit::LabelEntry]
// ============================================================================
//! Label entry.
//!
//! Contains the following properties:
//! * Label id - This is the only thing that is set to the `Label` operand.
//! * Label name - Optional, used mostly to create executables and libraries.
//! * Label type - Type of the label, default `Label::kTypeAnonymous`.
//! * Label parent id - Derived from many assemblers that allow to define a
//! local label that falls under a global label. This allows to define
//! many labels of the same name that have different parent (global) label.
//! * Offset - offset of the label bound by `Assembler`.
//! * Links - single-linked list that contains locations of code that has
//! to be patched when the label gets bound. Every use of unbound label
//! adds one link to `_links` list.
//! * HVal - Hash value of label's name and optionally parentId.
//! * HashNext - Hash-table implementation detail.
class LabelEntry : public ZoneHashNode {
public:
// NOTE: Label id is stored in `_customData`, which is provided by ZoneHashNode
// to fill a padding that a C++ compiler targeting 64-bit CPU will add to align
// the structure to 64-bits.
//! Get label id.
ASMJIT_INLINE uint32_t getId() const noexcept { return _customData; }
//! Set label id (internal, used only by \ref CodeHolder).
ASMJIT_INLINE void _setId(uint32_t id) noexcept { _customData = id; }
//! Get label type, see \ref Label::Type.
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
//! Get label flags, returns 0 at the moment.
ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
ASMJIT_INLINE bool hasParent() const noexcept { return _parentId != 0; }
//! Get label's parent id.
ASMJIT_INLINE uint32_t getParentId() const noexcept { return _parentId; }
//! Get label's section id where it's bound to (or `SectionEntry::kInvalidId` if it's not bound yet).
ASMJIT_INLINE uint32_t getSectionId() const noexcept { return _sectionId; }
//! Get if the label has name.
ASMJIT_INLINE bool hasName() const noexcept { return !_name.isEmpty(); }
//! Get the label's name.
//!
//! NOTE: Local labels will return their local name without their parent
//! part, for example ".L1".
ASMJIT_INLINE const char* getName() const noexcept { return _name.getData(); }
//! Get length of label's name.
//!
//! NOTE: Label name is always null terminated, so you can use `strlen()` to
//! get it, however, it's also cached in `LabelEntry`, so if you want to know
//! the length the easiest way is to use `LabelEntry::getNameLength()`.
ASMJIT_INLINE size_t getNameLength() const noexcept { return _name.getLength(); }
//! Get if the label is bound.
ASMJIT_INLINE bool isBound() const noexcept { return _sectionId != SectionEntry::kInvalidId; }
//! Get the label offset (only useful if the label is bound).
ASMJIT_INLINE intptr_t getOffset() const noexcept { return _offset; }
//! Get the hash-value of label's name and its parent label (if any).
//!
//! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function
//! is implemented in `Utils::hashString()` and `Utils::hashRound()`.
ASMJIT_INLINE uint32_t getHVal() const noexcept { return _hVal; }
// ------------------------------------------------------------------------
// [Members]
// ------------------------------------------------------------------------
// Let's round the size of `LabelEntry` to 64 bytes (as ZoneHeap has 32
// bytes granularity anyway). This gives `_name` the remaining space, which
// is roughly 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
enum { kNameBytes = 64 - (sizeof(ZoneHashNode) + 16 + sizeof(intptr_t) + sizeof(LabelLink*)) };
uint8_t _type; //!< Label type, see Label::Type.
uint8_t _flags; //!< Must be zero.
uint16_t _reserved16; //!< Reserved.
uint32_t _parentId; //!< Label parent id or zero.
uint32_t _sectionId; //!< Section id or `SectionEntry::kInvalidId`.
uint32_t _reserved32; //!< Reserved.
intptr_t _offset; //!< Label offset.
LabelLink* _links; //!< Label links.
SmallString<kNameBytes> _name; //!< Label name.
};
// ============================================================================
// [asmjit::RelocEntry]
// ============================================================================
//! Relocation entry.
struct RelocEntry {
ASMJIT_ENUM(Id) {
kInvalidId = 0xFFFFFFFFU //!< Invalid relocation id.
};
//! Relocation type.
ASMJIT_ENUM(Type) {
kTypeNone = 0, //!< Deleted entry (no relocation).
kTypeAbsToAbs = 1, //!< Relocate absolute to absolute.
kTypeRelToAbs = 2, //!< Relocate relative to absolute.
kTypeAbsToRel = 3, //!< Relocate absolute to relative.
kTypeTrampoline = 4 //!< Relocate absolute to relative or use trampoline.
};
// ------------------------------------------------------------------------
// [Accessors]
// ------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; }
ASMJIT_INLINE uint32_t getSourceSectionId() const noexcept { return _sourceSectionId; }
ASMJIT_INLINE uint32_t getTargetSectionId() const noexcept { return _targetSectionId; }
ASMJIT_INLINE uint64_t getSourceOffset() const noexcept { return _sourceOffset; }
ASMJIT_INLINE uint64_t getData() const noexcept { return _data; }
// ------------------------------------------------------------------------
// [Members]
// ------------------------------------------------------------------------
uint32_t _id; //!< Relocation id.
uint8_t _type; //!< Type of the relocation.
uint8_t _size; //!< Size of the relocation (1, 2, 4 or 8 bytes).
uint8_t _reserved[2]; //!< Reserved.
uint32_t _sourceSectionId; //!< Source section id.
uint32_t _targetSectionId; //!< Destination section id.
uint64_t _sourceOffset; //!< Source offset (relative to start of the section).
uint64_t _data; //!< Relocation data (target offset, target address, etc).
};
// ============================================================================
// [asmjit::CodeHolder]
// ============================================================================
//! Contains basic information about the target architecture plus its settings,
//! and holds code & data (including sections, labels, and relocation information).
//! CodeHolder can store both binary and intermediate representation of assembly,
//! which can be generated by \ref Assembler and/or \ref CodeBuilder.
//!
//! NOTE: CodeHolder has ability to attach an \ref ErrorHandler, however, this
//! error handler is not triggered by CodeHolder itself, it's only used by the
//! attached code generators.
class CodeHolder {
public:
ASMJIT_NONCOPYABLE(CodeHolder)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create an uninitialized CodeHolder (you must init() it before it can be used).
ASMJIT_API CodeHolder() noexcept;
//! Destroy the CodeHolder.
ASMJIT_API ~CodeHolder() noexcept;
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isInitialized() const noexcept { return _codeInfo.isInitialized(); }
//! Initialize to CodeHolder to hold code described by `codeInfo`.
ASMJIT_API Error init(const CodeInfo& info) noexcept;
//! Detach all code-generators attached and reset the \ref CodeHolder.
ASMJIT_API void reset(bool releaseMemory = false) noexcept;
// --------------------------------------------------------------------------
// [Attach / Detach]
// --------------------------------------------------------------------------
//! Attach a \ref CodeEmitter to this \ref CodeHolder.
ASMJIT_API Error attach(CodeEmitter* emitter) noexcept;
//! Detach a \ref CodeEmitter from this \ref CodeHolder.
ASMJIT_API Error detach(CodeEmitter* emitter) noexcept;
// --------------------------------------------------------------------------
// [Sync]
// --------------------------------------------------------------------------
//! Synchronize all states of all `CodeEmitter`s associated with the CodeHolder.
//! This is required as some code generators don't sync every time they do
//! something - for example \ref Assembler generally syncs when it needs to
//! reallocate the \ref CodeBuffer, but not each time it encodes instruction
//! or directive.
ASMJIT_API void sync() noexcept;
// --------------------------------------------------------------------------
// [Code-Information]
// --------------------------------------------------------------------------
//! Get code/target information, see \ref CodeInfo.
ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; }
//! Get architecture information, see \ref ArchInfo.
ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _codeInfo.getArchInfo(); }
//! Get the target's architecture type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return getArchInfo().getType(); }
//! Get the target's architecture sub-type.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return getArchInfo().getSubType(); }
//! Get if a static base-address is set.
ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _codeInfo.hasBaseAddress(); }
//! Get a static base-address (uint64_t).
ASMJIT_INLINE uint64_t getBaseAddress() const noexcept { return _codeInfo.getBaseAddress(); }
// --------------------------------------------------------------------------
// [Global Information]
// --------------------------------------------------------------------------
//! Get global hints, internally propagated to all `CodeEmitter`s attached.
ASMJIT_INLINE uint32_t getGlobalHints() const noexcept { return _globalHints; }
//! Get global options, internally propagated to all `CodeEmitter`s attached.
ASMJIT_INLINE uint32_t getGlobalOptions() const noexcept { return _globalOptions; }
// --------------------------------------------------------------------------
// [Result Information]
// --------------------------------------------------------------------------
//! Get the size code & data of all sections.
ASMJIT_API size_t getCodeSize() const noexcept;
//! Get size of all possible trampolines.
//!
//! Trampolines are needed to successfully generate relative jumps to absolute
//! addresses. This value is only non-zero if jmp of call instructions were
//! used with immediate operand (this means jumping or calling an absolute
//! address directly).
ASMJIT_INLINE size_t getTrampolinesSize() const noexcept { return _trampolinesSize; }
// --------------------------------------------------------------------------
// [Logging & Error Handling]
// --------------------------------------------------------------------------
#if !defined(ASMJIT_DISABLE_LOGGING)
//! Get if a logger attached.
ASMJIT_INLINE bool hasLogger() const noexcept { return _logger != nullptr; }
//! Get the attached logger.
ASMJIT_INLINE Logger* getLogger() const noexcept { return _logger; }
//! Attach a `logger` to CodeHolder and propagate it to all attached `CodeEmitter`s.
ASMJIT_API void setLogger(Logger* logger) noexcept;
//! Reset the logger (does nothing if not attached).
ASMJIT_INLINE void resetLogger() noexcept { setLogger(nullptr); }
#endif // !ASMJIT_DISABLE_LOGGING
//! Get if error-handler is attached.
ASMJIT_INLINE bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Get the error-handler.
ASMJIT_INLINE ErrorHandler* getErrorHandler() const noexcept { return _errorHandler; }
//! Set the error handler, will affect all attached `CodeEmitter`s.
ASMJIT_API Error setErrorHandler(ErrorHandler* handler) noexcept;
//! Reset the error handler (does nothing if not attached).
ASMJIT_INLINE void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
// --------------------------------------------------------------------------
// [Sections]
// --------------------------------------------------------------------------
//! Get array of `SectionEntry*` records.
ASMJIT_INLINE const ZoneVector<SectionEntry*>& getSections() const noexcept { return _sections; }
//! Get a section entry of the given index.
ASMJIT_INLINE SectionEntry* getSectionEntry(size_t index) const noexcept { return _sections[index]; }
ASMJIT_API Error growBuffer(CodeBuffer* cb, size_t n) noexcept;
ASMJIT_API Error reserveBuffer(CodeBuffer* cb, size_t n) noexcept;
// --------------------------------------------------------------------------
// [Labels & Symbols]
// --------------------------------------------------------------------------
//! Create a new anonymous label and return its id in `idOut`.
//!
//! Returns `Error`, does not report error to \ref ErrorHandler.
ASMJIT_API Error newLabelId(uint32_t& idOut) noexcept;
//! Create a new named label label-type `type`.
//!
//! Returns `Error`, does not report error to \ref ErrorHandler.
ASMJIT_API Error newNamedLabelId(uint32_t& idOut, const char* name, size_t nameLength, uint32_t type, uint32_t parentId) noexcept;
//! Get a label id by name.
ASMJIT_API uint32_t getLabelIdByName(const char* name, size_t nameLength = Globals::kInvalidIndex, uint32_t parentId = 0) noexcept;
//! Create a new label-link used to store information about yet unbound labels.
//!
//! Returns `null` if the allocation failed.
ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept;
//! Get array of `LabelEntry*` records.
ASMJIT_INLINE const ZoneVector<LabelEntry*>& getLabelEntries() const noexcept { return _labels; }
//! Get number of labels created.
ASMJIT_INLINE size_t getLabelsCount() const noexcept { return _labels.getLength(); }
//! Get number of label references, which are unresolved at the moment.
ASMJIT_INLINE size_t getUnresolvedLabelsCount() const noexcept { return _unresolvedLabelsCount; }
//! Get if the `label` is valid (i.e. created by `newLabelId()`).
ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
return isLabelValid(label.getId());
}
//! Get if the label having `id` is valid (i.e. created by `newLabelId()`).
ASMJIT_INLINE bool isLabelValid(uint32_t labelId) const noexcept {
size_t index = Operand::unpackId(labelId);
return index < _labels.getLength();
}
//! Get if the `label` is already bound.
//!
//! Returns `false` if the `label` is not valid.
ASMJIT_INLINE bool isLabelBound(const Label& label) const noexcept {
return isLabelBound(label.getId());
}
//! \overload
ASMJIT_INLINE bool isLabelBound(uint32_t id) const noexcept {
size_t index = Operand::unpackId(id);
return index < _labels.getLength() && _labels[index]->isBound();
}
//! Get a `label` offset or -1 if the label is not yet bound.
ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const noexcept {
return getLabelOffset(label.getId());
}
//! \overload
ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const noexcept {
ASMJIT_ASSERT(isLabelValid(id));
return _labels[Operand::unpackId(id)]->getOffset();
}
//! Get information about the given `label`.
ASMJIT_INLINE LabelEntry* getLabelEntry(const Label& label) const noexcept {
return getLabelEntry(label.getId());
}
//! Get information about a label having the given `id`.
ASMJIT_INLINE LabelEntry* getLabelEntry(uint32_t id) const noexcept {
size_t index = static_cast<size_t>(Operand::unpackId(id));
return index < _labels.getLength() ? _labels[index] : static_cast<LabelEntry*>(nullptr);
}
// --------------------------------------------------------------------------
// [Relocations]
// --------------------------------------------------------------------------
//! Create a new relocation entry of type `type` and size `size`.
//!
//! Additional fields can be set after the relocation entry was created.
ASMJIT_API Error newRelocEntry(RelocEntry** dst, uint32_t type, uint32_t size) noexcept;
//! Get if the code contains relocations.
ASMJIT_INLINE bool hasRelocations() const noexcept { return !_relocations.isEmpty(); }
//! Get array of `RelocEntry*` records.
ASMJIT_INLINE const ZoneVector<RelocEntry*>& getRelocEntries() const noexcept { return _relocations; }
ASMJIT_INLINE RelocEntry* getRelocEntry(uint32_t id) const noexcept { return _relocations[id]; }
//! Relocate the code to `baseAddress` and copy it to `dst`.
//!
//! \param dst Contains the location where the relocated code should be
//! copied. The pointer can be address returned by virtual memory allocator
//! or any other address that has sufficient space.
//!
//! \param baseAddress Base address used for relocation. `JitRuntime` always
//! sets the `baseAddress` to be the same as `dst`.
//!
//! \return The number bytes actually used. If the code emitter reserved
//! space for possible trampolines, but didn't use it, the number of bytes
//! used can actually be less than the expected worst case. Virtual memory
//! allocator can shrink the memory it allocated initially.
//!
//! A given buffer will be overwritten, to get the number of bytes required,
//! use `getCodeSize()`.
ASMJIT_API size_t relocate(void* dst, uint64_t baseAddress = Globals::kNoBaseAddress) const noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CodeInfo _codeInfo; //!< Basic information about the code (architecture and other info).
uint32_t _globalHints; //!< Global hints, propagated to all `CodeEmitter`s.
uint32_t _globalOptions; //!< Global options, propagated to all `CodeEmitter`s.
CodeEmitter* _emitters; //!< Linked-list of all attached `CodeEmitter`s.
Assembler* _cgAsm; //!< Attached \ref Assembler (only one at a time).
Logger* _logger; //!< Attached \ref Logger, used by all consumers.
ErrorHandler* _errorHandler; //!< Attached \ref ErrorHandler.
uint32_t _unresolvedLabelsCount; //!< Count of label references which were not resolved.
uint32_t _trampolinesSize; //!< Size of all possible trampolines.
Zone _baseZone; //!< Base zone (used to allocate core structures).
Zone _dataZone; //!< Data zone (used to allocate extra data like label names).
ZoneHeap _baseHeap; //!< Zone allocator, used to manage internal containers.
ZoneVector<SectionEntry*> _sections; //!< Section entries.
ZoneVector<LabelEntry*> _labels; //!< Label entries (each label is stored here).
ZoneVector<RelocEntry*> _relocations; //!< Relocation entries.
ZoneHash<LabelEntry> _namedLabels; //!< Label name -> LabelEntry (only named labels).
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CODEHOLDER_H

View file

@ -0,0 +1,511 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/constpool.h"
#include "../base/utils.h"
#include <algorithm>
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// Binary tree code is based on Julienne Walker's "Andersson Binary Trees"
// article and implementation. However, only three operations are implemented -
// get, insert and traverse.
// ============================================================================
// [asmjit::ConstPool::Tree - Ops]
// ============================================================================
//! \internal
//!
//! Remove left horizontal links.
static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* node) noexcept {
ConstPool::Node* link = node->_link[0];
uint32_t level = node->_level;
if (level != 0 && link && link->_level == level) {
node->_link[0] = link->_link[1];
link->_link[1] = node;
node = link;
}
return node;
}
//! \internal
//!
//! Remove consecutive horizontal links.
static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* node) noexcept {
ConstPool::Node* link = node->_link[1];
uint32_t level = node->_level;
if (level != 0 && link && link->_link[1] && link->_link[1]->_level == level) {
node->_link[1] = link->_link[0];
link->_link[0] = node;
node = link;
node->_level++;
}
return node;
}
ConstPool::Node* ConstPool::Tree::get(const void* data) noexcept {
ConstPool::Node* node = _root;
size_t dataSize = _dataSize;
while (node) {
int c = ::memcmp(node->getData(), data, dataSize);
if (c == 0)
return node;
node = node->_link[c < 0];
}
return nullptr;
}
void ConstPool::Tree::put(ConstPool::Node* newNode) noexcept {
size_t dataSize = _dataSize;
_length++;
if (!_root) {
_root = newNode;
return;
}
ConstPool::Node* node = _root;
ConstPool::Node* stack[kHeightLimit];
unsigned int top = 0;
unsigned int dir;
// Find a spot and save the stack.
for (;;) {
stack[top++] = node;
dir = ::memcmp(node->getData(), newNode->getData(), dataSize) < 0;
ConstPool::Node* link = node->_link[dir];
if (!link) break;
node = link;
}
// Link and rebalance.
node->_link[dir] = newNode;
while (top > 0) {
// Which child?
node = stack[--top];
if (top != 0) {
dir = stack[top - 1]->_link[1] == node;
}
node = ConstPoolTree_skewNode(node);
node = ConstPoolTree_splitNode(node);
// Fix the parent.
if (top != 0)
stack[top - 1]->_link[dir] = node;
else
_root = node;
}
}
// ============================================================================
// [asmjit::ConstPool - Construction / Destruction]
// ============================================================================
ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
ConstPool::~ConstPool() noexcept {}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
void ConstPool::reset(Zone* zone) noexcept {
_zone = zone;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_tree[i].setDataSize(dataSize);
_gaps[i] = nullptr;
dataSize <<= 1;
}
_gapPool = nullptr;
_size = 0;
_alignment = 0;
}
// ============================================================================
// [asmjit::ConstPool - Ops]
// ============================================================================
static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (!gap) return self->_zone->allocT<ConstPool::Gap>();
self->_gapPool = gap->_next;
return gap;
}
static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
gap->_next = self->_gapPool;
self->_gapPool = gap;
}
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) noexcept {
ASMJIT_ASSERT(length > 0);
while (length > 0) {
size_t gapIndex;
size_t gapLength;
gapIndex = ConstPool::kIndex16;
if (length >= 16 && Utils::isAligned<size_t>(offset, 16)) {
gapLength = 16;
}
else if (length >= 8 && Utils::isAligned<size_t>(offset, 8)) {
gapIndex = ConstPool::kIndex8;
gapLength = 8;
}
else if (length >= 4 && Utils::isAligned<size_t>(offset, 4)) {
gapIndex = ConstPool::kIndex4;
gapLength = 4;
}
else if (length >= 2 && Utils::isAligned<size_t>(offset, 2)) {
gapIndex = ConstPool::kIndex2;
gapLength = 2;
}
else {
gapIndex = ConstPool::kIndex1;
gapLength = 1;
}
// We don't have to check for errors here, if this failed nothing really
// happened (just the gap won't be visible) and it will fail again at
// place where checking will cause kErrorNoHeapMemory.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (!gap) return;
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
gap->_offset = offset;
gap->_length = gapLength;
offset += gapLength;
length -= gapLength;
}
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
size_t treeIndex;
if (size == 32)
treeIndex = kIndex32;
else if (size == 16)
treeIndex = kIndex16;
else if (size == 8)
treeIndex = kIndex8;
else if (size == 4)
treeIndex = kIndex4;
else if (size == 2)
treeIndex = kIndex2;
else if (size == 1)
treeIndex = kIndex1;
else
return DebugUtils::errored(kErrorInvalidArgument);
ConstPool::Node* node = _tree[treeIndex].get(data);
if (node) {
dstOffset = node->_offset;
return kErrorOk;
}
// Before incrementing the current offset try if there is a gap that can
// be used for the requested data.
size_t offset = ~static_cast<size_t>(0);
size_t gapIndex = treeIndex;
while (gapIndex != kIndexCount - 1) {
ConstPool::Gap* gap = _gaps[treeIndex];
// Check if there is a gap.
if (gap) {
size_t gapOffset = gap->_offset;
size_t gapLength = gap->_length;
// Destroy the gap for now.
_gaps[treeIndex] = gap->_next;
ConstPool_freeGap(this, gap);
offset = gapOffset;
ASMJIT_ASSERT(Utils::isAligned<size_t>(offset, size));
gapLength -= size;
if (gapLength > 0)
ConstPool_addGap(this, gapOffset, gapLength);
}
gapIndex++;
}
if (offset == ~static_cast<size_t>(0)) {
// Get how many bytes have to be skipped so the address is aligned accordingly
// to the 'size'.
size_t diff = Utils::alignDiff<size_t>(_size, size);
if (diff != 0) {
ConstPool_addGap(this, _size, diff);
_size += diff;
}
offset = _size;
_size += size;
}
// Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
if (!node) return DebugUtils::errored(kErrorNoHeapMemory);
_tree[treeIndex].put(node);
_alignment = std::max<size_t>(_alignment, size);
dstOffset = offset;
// Now create a bunch of shared constants that are based on the data pattern.
// We stop at size 4, it probably doesn't make sense to split constants down
// to 1 byte.
size_t pCount = 1;
while (size > 4) {
size >>= 1;
pCount <<= 1;
ASMJIT_ASSERT(treeIndex != 0);
treeIndex--;
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += size) {
node = _tree[treeIndex].get(pData);
if (node) continue;
node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
_tree[treeIndex].put(node);
}
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
struct ConstPoolFill {
ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
_dst(dst),
_dataSize(dataSize) {}
ASMJIT_INLINE void visit(const ConstPool::Node* node) noexcept {
if (!node->_shared)
::memcpy(_dst + node->_offset, node->getData(), _dataSize);
}
uint8_t* _dst;
size_t _dataSize;
};
void ConstPool::fill(void* dst) const noexcept {
// Clears possible gaps, asmjit should never emit garbage to the output.
::memset(dst, 0, _size);
ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].iterate(filler);
filler._dataSize <<= 1;
}
}
// ============================================================================
// [asmjit::ConstPool - Test]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(base_constpool) {
Zone zone(32384 - Zone::kZoneOverhead);
ConstPool pool(&zone);
uint32_t i;
uint32_t kCount = 1000000;
INFO("Adding %u constants to the pool.", kCount);
{
size_t prevOffset;
size_t curOffset;
uint64_t c = ASMJIT_UINT64_C(0x0101010101010101);
EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk,
"pool.add() - Returned error");
EXPECT(prevOffset == 0,
"pool.add() - First constant should have zero offset");
for (i = 1; i < kCount; i++) {
c++;
EXPECT(pool.add(&c, 8, curOffset) == kErrorOk,
"pool.add() - Returned error");
EXPECT(prevOffset + 8 == curOffset,
"pool.add() - Returned incorrect curOffset");
EXPECT(pool.getSize() == (i + 1) * 8,
"pool.getSize() - Reported incorrect size");
prevOffset = curOffset;
}
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment");
}
INFO("Retrieving %u constants from the pool.", kCount);
{
uint64_t c = ASMJIT_UINT64_C(0x0101010101010101);
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk,
"pool.add() - Returned error");
EXPECT(offset == i * 8,
"pool.add() - Should have reused constant");
c++;
}
}
INFO("Checking if the constants were split into 4-byte patterns");
{
uint32_t c = 0x01010101;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 4, offset) == kErrorOk,
"pool.add() - Returned error");
EXPECT(offset == i * 8,
"pool.add() - Should reuse existing constant");
c++;
}
}
INFO("Adding 2 byte constant to misalign the current offset");
{
uint16_t c = 0xFFFF;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk,
"pool.add() - Returned error");
EXPECT(offset == kCount * 8,
"pool.add() - Didn't return expected position");
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment");
}
INFO("Adding 8 byte constant to check if pool gets aligned again");
{
uint64_t c = ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF);
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk,
"pool.add() - Returned error");
EXPECT(offset == kCount * 8 + 8,
"pool.add() - Didn't return aligned offset");
}
INFO("Adding 2 byte constant to verify the gap is filled");
{
uint16_t c = 0xFFFE;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk,
"pool.add() - Returned error");
EXPECT(offset == kCount * 8 + 2,
"pool.add() - Didn't fill the gap");
EXPECT(pool.getAlignment() == 8,
"pool.getAlignment() - Expected 8-byte alignment");
}
INFO("Checking reset functionality");
{
pool.reset(&zone);
zone.reset();
EXPECT(pool.getSize() == 0,
"pool.getSize() - Expected pool size to be zero");
EXPECT(pool.getAlignment() == 0,
"pool.getSize() - Expected pool alignment to be zero");
}
INFO("Checking pool alignment when combined constants are added");
{
uint8_t bytes[32] = { 0 };
size_t offset;
pool.add(bytes, 1, offset);
EXPECT(pool.getSize() == 1,
"pool.getSize() - Expected pool size to be 1 byte");
EXPECT(pool.getAlignment() == 1,
"pool.getSize() - Expected pool alignment to be 1 byte");
EXPECT(offset == 0,
"pool.getSize() - Expected offset returned to be zero");
pool.add(bytes, 2, offset);
EXPECT(pool.getSize() == 4,
"pool.getSize() - Expected pool size to be 4 bytes");
EXPECT(pool.getAlignment() == 2,
"pool.getSize() - Expected pool alignment to be 2 bytes");
EXPECT(offset == 2,
"pool.getSize() - Expected offset returned to be 2");
pool.add(bytes, 4, offset);
EXPECT(pool.getSize() == 8,
"pool.getSize() - Expected pool size to be 8 bytes");
EXPECT(pool.getAlignment() == 4,
"pool.getSize() - Expected pool alignment to be 4 bytes");
EXPECT(offset == 4,
"pool.getSize() - Expected offset returned to be 4");
pool.add(bytes, 4, offset);
EXPECT(pool.getSize() == 8,
"pool.getSize() - Expected pool size to be 8 bytes");
EXPECT(pool.getAlignment() == 4,
"pool.getSize() - Expected pool alignment to be 4 bytes");
EXPECT(offset == 4,
"pool.getSize() - Expected offset returned to be 8");
pool.add(bytes, 32, offset);
EXPECT(pool.getSize() == 64,
"pool.getSize() - Expected pool size to be 64 bytes");
EXPECT(pool.getAlignment() == 32,
"pool.getSize() - Expected pool alignment to be 32 bytes");
EXPECT(offset == 32,
"pool.getSize() - Expected offset returned to be 32");
}
}
#endif // ASMJIT_TEST
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,257 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CONSTPOOL_H
#define _ASMJIT_BASE_CONSTPOOL_H
// [Dependencies]
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::ConstPool]
// ============================================================================
//! Constant pool.
class ConstPool {
public:
ASMJIT_NONCOPYABLE(ConstPool)
enum {
kIndex1 = 0,
kIndex2 = 1,
kIndex4 = 2,
kIndex8 = 3,
kIndex16 = 4,
kIndex32 = 5,
kIndexCount = 6
};
// --------------------------------------------------------------------------
// [Gap]
// --------------------------------------------------------------------------
//! \internal
//!
//! Zone-allocated const-pool gap.
struct Gap {
Gap* _next; //!< Pointer to the next gap
size_t _offset; //!< Offset of the gap.
size_t _length; //!< Remaining bytes of the gap (basically a gap size).
};
// --------------------------------------------------------------------------
// [Node]
// --------------------------------------------------------------------------
//! \internal
//!
//! Zone-allocated const-pool node.
struct Node {
ASMJIT_INLINE void* getData() const noexcept {
return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
}
Node* _link[2]; //!< Left/Right nodes.
uint32_t _level : 31; //!< Horizontal level for balance.
uint32_t _shared : 1; //!< If this constant is shared with another.
uint32_t _offset; //!< Data offset from the beginning of the pool.
};
// --------------------------------------------------------------------------
// [Tree]
// --------------------------------------------------------------------------
//! \internal
//!
//! Zone-allocated const-pool tree.
struct Tree {
enum {
//! Maximum tree height == log2(1 << 64).
kHeightLimit = 64
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE Tree(size_t dataSize = 0) noexcept
: _root(nullptr),
_length(0),
_dataSize(dataSize) {}
ASMJIT_INLINE ~Tree() {}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() noexcept {
_root = nullptr;
_length = 0;
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; }
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
ASMJIT_INLINE void setDataSize(size_t dataSize) noexcept {
ASMJIT_ASSERT(isEmpty());
_dataSize = dataSize;
}
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
ASMJIT_API Node* get(const void* data) noexcept;
ASMJIT_API void put(Node* node) noexcept;
// --------------------------------------------------------------------------
// [Iterate]
// --------------------------------------------------------------------------
template<typename Visitor>
ASMJIT_INLINE void iterate(Visitor& visitor) const noexcept {
Node* node = const_cast<Node*>(_root);
if (!node) return;
Node* stack[kHeightLimit];
size_t top = 0;
for (;;) {
Node* left = node->_link[0];
if (left != nullptr) {
ASMJIT_ASSERT(top != kHeightLimit);
stack[top++] = node;
node = left;
continue;
}
Visit:
visitor.visit(node);
node = node->_link[1];
if (node != nullptr)
continue;
if (top == 0)
return;
node = stack[--top];
goto Visit;
}
}
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = zone->allocT<Node>(sizeof(Node) + size);
if (ASMJIT_UNLIKELY(!node)) return nullptr;
node->_link[0] = nullptr;
node->_link[1] = nullptr;
node->_level = 1;
node->_shared = shared;
node->_offset = static_cast<uint32_t>(offset);
::memcpy(node->getData(), data, size);
return node;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Node* _root; //!< Root of the tree
size_t _length; //!< Length of the tree (count of nodes).
size_t _dataSize; //!< Size of the data.
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API ConstPool(Zone* zone) noexcept;
ASMJIT_API ~ConstPool() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_API void reset(Zone* zone) noexcept;
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
//! Get whether the constant-pool is empty.
ASMJIT_INLINE bool isEmpty() const noexcept { return _size == 0; }
//! Get the size of the constant-pool in bytes.
ASMJIT_INLINE size_t getSize() const noexcept { return _size; }
//! Get minimum alignment.
ASMJIT_INLINE size_t getAlignment() const noexcept { return _alignment; }
//! Add a constant to the constant pool.
//!
//! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
//! The constant is added to the pool only if it doesn't not exist, otherwise
//! cached value is returned.
//!
//! AsmJit is able to subdivide added constants, so for example if you add
//! 8-byte constant 0x1122334455667788 it will create the following slots:
//!
//! 8-byte: 0x1122334455667788
//! 4-byte: 0x11223344, 0x55667788
//!
//! The reason is that when combining MMX/SSE/AVX code some patterns are used
//! frequently. However, AsmJit is not able to reallocate a constant that has
//! been already added. For example if you try to add 4-byte constant and then
//! 8-byte constant having the same 4-byte pattern as the previous one, two
//! independent slots will be generated by the pool.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
// --------------------------------------------------------------------------
// [Fill]
// --------------------------------------------------------------------------
//! Fill the destination with the constants from the pool.
ASMJIT_API void fill(void* dst) const noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Zone* _zone; //!< Zone allocator.
Tree _tree[kIndexCount]; //!< Tree per size.
Gap* _gaps[kIndexCount]; //!< Gaps per size.
Gap* _gapPool; //!< Gaps pool
size_t _size; //!< Size of the pool (in bytes).
size_t _alignment; //!< Required pool alignment.
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CONSTPOOL_H

View file

@ -0,0 +1,674 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/cpuinfo.h"
#include "../base/utils.h"
#if ASMJIT_OS_POSIX
# include <errno.h>
# include <sys/utsname.h>
# include <unistd.h>
#endif // ASMJIT_OS_POSIX
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
# if ASMJIT_CC_MSC_GE(14, 0, 0)
# include <intrin.h> // Required by `__cpuid()` and `_xgetbv()`.
# endif // _MSC_VER >= 1400
#endif
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
# if ASMJIT_OS_LINUX
# include <sys/auxv.h> // Required by `getauxval()`.
# endif
#endif
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CpuInfo - Detect ARM]
// ============================================================================
// ARM information has to be retrieved by the OS (this is how ARM was designed).
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
#if ASMJIT_ARCH_ARM32
static ASMJIT_INLINE void armPopulateBaselineA32Features(CpuInfo* cpuInfo) noexcept {
cpuInfo->_archInfo.init(ArchInfo::kTypeA32);
}
#endif // ASMJIT_ARCH_ARM32
#if ASMJIT_ARCH_ARM64
static ASMJIT_INLINE void armPopulateBaselineA64Features(CpuInfo* cpuInfo) noexcept {
cpuInfo->_archInfo.init(ArchInfo::kTypeA64);
// Thumb (including all variations) is supported on A64 (but not accessible from A64).
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB);
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2);
// A64 is based on ARMv8 and newer.
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
// A64 comes with these features by default.
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv3);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv4);
cpuInfo->addFeature(CpuInfo::kArmFeatureEDSP);
cpuInfo->addFeature(CpuInfo::kArmFeatureASIMD);
cpuInfo->addFeature(CpuInfo::kArmFeatureIDIVA);
cpuInfo->addFeature(CpuInfo::kArmFeatureIDIVT);
}
#endif // ASMJIT_ARCH_ARM64
#if ASMJIT_OS_WINDOWS
//! \internal
//!
//! Detect ARM CPU features on Windows.
//!
//! The detection is based on `IsProcessorFeaturePresent()` API call.
static ASMJIT_INLINE void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_ARCH_ARM32
armPopulateBaselineA32Features(cpuInfo);
// Windows for ARM requires at least ARMv7 with DSP extensions.
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
cpuInfo->addFeature(CpuInfo::kArmFeatureEDSP);
// Windows for ARM requires VFPv3.
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv3);
// Windows for ARM requires and uses THUMB2.
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB);
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2);
#else
armPopulateBaselineA64Features(cpuInfo);
#endif
// Windows for ARM requires ASIMD.
cpuInfo->addFeature(CpuInfo::kArmFeatureASIMD);
// Detect additional CPU features by calling `IsProcessorFeaturePresent()`.
struct WinPFPMapping {
uint32_t pfpId;
uint32_t featureId;
};
static const WinPFPMapping mapping[] = {
{ PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE , CpuInfo::kArmFeatureVFPv4 },
{ PF_ARM_VFP_32_REGISTERS_AVAILABLE , CpuInfo::kArmFeatureVFP_D32 },
{ PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE, CpuInfo::kArmFeatureIDIVT },
{ PF_ARM_64BIT_LOADSTORE_ATOMIC , CpuInfo::kArmFeatureAtomics64 }
};
for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(mapping); i++)
if (::IsProcessorFeaturePresent(mapping[i].pfpId))
cpuInfo->addFeature(mapping[i].featureId);
}
#endif // ASMJIT_OS_WINDOWS
#if ASMJIT_OS_LINUX
struct LinuxHWCapMapping {
uint32_t hwcapMask;
uint32_t featureId;
};
static void armDetectHWCaps(CpuInfo* cpuInfo, unsigned long type, const LinuxHWCapMapping* mapping, size_t length) noexcept {
unsigned long mask = getauxval(type);
for (size_t i = 0; i < length; i++)
if ((mask & mapping[i].hwcapMask) == mapping[i].hwcapMask)
cpuInfo->addFeature(mapping[i].featureId);
}
//! \internal
//!
//! Detect ARM CPU features on Linux.
//!
//! The detection is based on `getauxval()`.
ASMJIT_FAVOR_SIZE static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_ARCH_ARM32
armPopulateBaselineA32Features(cpuInfo);
// `AT_HWCAP` provides ARMv7 (and less) related flags.
static const LinuxHWCapMapping hwCapMapping[] = {
{ /* HWCAP_VFP */ (1 << 6), CpuInfo::kArmFeatureVFPv2 },
{ /* HWCAP_EDSP */ (1 << 7), CpuInfo::kArmFeatureEDSP },
{ /* HWCAP_NEON */ (1 << 12), CpuInfo::kArmFeatureASIMD },
{ /* HWCAP_VFPv3 */ (1 << 13), CpuInfo::kArmFeatureVFPv3 },
{ /* HWCAP_VFPv4 */ (1 << 16), CpuInfo::kArmFeatureVFPv4 },
{ /* HWCAP_IDIVA */ (1 << 17), CpuInfo::kArmFeatureIDIVA },
{ /* HWCAP_IDIVT */ (1 << 18), CpuInfo::kArmFeatureIDIVT },
{ /* HWCAP_VFPD32 */ (1 << 19), CpuInfo::kArmFeatureVFP_D32 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
// VFPv3 implies VFPv2.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv3)) {
cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2);
}
// VFPv2 implies ARMv6.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv2)) {
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
}
// VFPv3 or ASIMD implies ARMv7.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv3) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureASIMD)) {
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
}
// `AT_HWCAP2` provides ARMv8+ related flags.
static const LinuxHWCapMapping hwCap2Mapping[] = {
{ /* HWCAP2_AES */ (1 << 0), CpuInfo::kArmFeatureAES },
{ /* HWCAP2_PMULL */ (1 << 1), CpuInfo::kArmFeaturePMULL },
{ /* HWCAP2_SHA1 */ (1 << 2), CpuInfo::kArmFeatureSHA1 },
{ /* HWCAP2_SHA2 */ (1 << 3), CpuInfo::kArmFeatureSHA256 },
{ /* HWCAP2_CRC32 */ (1 << 4), CpuInfo::kArmFeatureCRC32 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP2, hwCap2Mapping, ASMJIT_ARRAY_SIZE(hwCap2Mapping));
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureAES ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureCRC32 ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeaturePMULL ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA1 ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA256)) {
cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
}
#else
armPopulateBaselineA64Features(cpuInfo);
// `AT_HWCAP` provides ARMv8+ related flags.
static const LinuxHWCapMapping hwCapMapping[] = {
{ /* HWCAP_ASIMD */ (1 << 1), CpuInfo::kArmFeatureASIMD },
{ /* HWCAP_AES */ (1 << 3), CpuInfo::kArmFeatureAES },
{ /* HWCAP_CRC32 */ (1 << 7), CpuInfo::kArmFeatureCRC32 },
{ /* HWCAP_PMULL */ (1 << 4), CpuInfo::kArmFeaturePMULL },
{ /* HWCAP_SHA1 */ (1 << 5), CpuInfo::kArmFeatureSHA1 },
{ /* HWCAP_SHA2 */ (1 << 6), CpuInfo::kArmFeatureSHA256 },
{ /* HWCAP_ATOMICS */ (1 << 8), CpuInfo::kArmFeatureAtomics64 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
// `AT_HWCAP2` is not used at the moment.
#endif
}
#endif // ASMJIT_OS_LINUX
ASMJIT_FAVOR_SIZE static void armDetectCpuInfo(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_OS_WINDOWS
armDetectCpuInfoOnWindows(cpuInfo);
#elif ASMJIT_OS_LINUX
armDetectCpuInfoOnLinux(cpuInfo);
#else
# error "[asmjit] armDetectCpuInfo() - Unsupported OS."
#endif
}
#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
// ============================================================================
// [asmjit::CpuInfo - Detect X86]
// ============================================================================
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
//! \internal
//!
//! X86 CPUID result.
struct CpuIdResult {
uint32_t eax, ebx, ecx, edx;
};
//! \internal
//!
//! Content of XCR register, result of XGETBV instruction.
struct XGetBVResult {
uint32_t eax, edx;
};
#if ASMJIT_CC_MSC && !ASMJIT_CC_MSC_GE(15, 0, 30729) && ASMJIT_ARCH_X64
//! \internal
//!
//! HACK: VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However,
//! 64-bit calling convention specifies the first parameter to be passed by
//! ECX, so we may be lucky if compiler doesn't move the register, otherwise
//! the result would be wrong.
static void ASMJIT_NOINLINE void x86CallCpuIdWorkaround(uint32_t inEcx, uint32_t inEax, CpuIdResult* result) noexcept {
__cpuid(reinterpret_cast<int*>(result), inEax);
}
#endif
//! \internal
//!
//! Wrapper to call `cpuid` instruction.
static void ASMJIT_INLINE x86CallCpuId(CpuIdResult* result, uint32_t inEax, uint32_t inEcx = 0) noexcept {
#if ASMJIT_CC_MSC && ASMJIT_CC_MSC_GE(15, 0, 30729)
__cpuidex(reinterpret_cast<int*>(result), inEax, inEcx);
#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X64
x86CallCpuIdWorkaround(inEcx, inEax, result);
#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X86
uint32_t paramEax = inEax;
uint32_t paramEcx = inEcx;
uint32_t* out = reinterpret_cast<uint32_t*>(result);
__asm {
mov eax, paramEax
mov ecx, paramEcx
mov edi, out
cpuid
mov dword ptr[edi + 0], eax
mov dword ptr[edi + 4], ebx
mov dword ptr[edi + 8], ecx
mov dword ptr[edi + 12], edx
}
#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && ASMJIT_ARCH_X86
__asm__ __volatile__(
"mov %%ebx, %%edi\n"
"cpuid\n"
"xchg %%edi, %%ebx\n"
: "=a"(result->eax),
"=D"(result->ebx),
"=c"(result->ecx),
"=d"(result->edx)
: "a"(inEax),
"c"(inEcx));
#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG || ASMJIT_CC_INTEL) && ASMJIT_ARCH_X64
__asm__ __volatile__(
"mov %%rbx, %%rdi\n"
"cpuid\n"
"xchg %%rdi, %%rbx\n"
: "=a"(result->eax),
"=D"(result->ebx),
"=c"(result->ecx),
"=d"(result->edx)
: "a"(inEax),
"c"(inEcx));
#else
# error "[asmjit] x86CallCpuid() - Unsupported compiler."
#endif
}
//! \internal
//!
//! Wrapper to call `xgetbv` instruction.
static ASMJIT_INLINE void x86CallXGetBV(XGetBVResult* result, uint32_t inEcx) noexcept {
#if ASMJIT_CC_MSC_GE(16, 0, 40219) // 2010SP1+
uint64_t value = _xgetbv(inEcx);
result->eax = static_cast<uint32_t>(value & 0xFFFFFFFFU);
result->edx = static_cast<uint32_t>(value >> 32);
#elif ASMJIT_CC_GCC || ASMJIT_CC_CLANG
uint32_t outEax;
uint32_t outEdx;
// Replaced, because the world is not perfect:
// __asm__ __volatile__("xgetbv" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
__asm__ __volatile__(".byte 0x0F, 0x01, 0xd0" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
result->eax = outEax;
result->edx = outEdx;
#else
result->eax = 0;
result->edx = 0;
#endif
}
//! \internal
//!
//! Map a 12-byte vendor string returned by `cpuid` into a `CpuInfo::Vendor` ID.
static ASMJIT_INLINE uint32_t x86GetCpuVendorID(const char* vendorString) noexcept {
struct VendorData {
uint32_t id;
char text[12];
};
static const VendorData vendorList[] = {
{ CpuInfo::kVendorIntel , { 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' } },
{ CpuInfo::kVendorAMD , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } },
{ CpuInfo::kVendorVIA , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } },
{ CpuInfo::kVendorVIA , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } }
};
uint32_t dw0 = reinterpret_cast<const uint32_t*>(vendorString)[0];
uint32_t dw1 = reinterpret_cast<const uint32_t*>(vendorString)[1];
uint32_t dw2 = reinterpret_cast<const uint32_t*>(vendorString)[2];
for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(vendorList); i++) {
if (dw0 == reinterpret_cast<const uint32_t*>(vendorList[i].text)[0] &&
dw1 == reinterpret_cast<const uint32_t*>(vendorList[i].text)[1] &&
dw2 == reinterpret_cast<const uint32_t*>(vendorList[i].text)[2])
return vendorList[i].id;
}
return CpuInfo::kVendorNone;
}
static ASMJIT_INLINE void x86SimplifyBrandString(char* s) noexcept {
// Used to always clear the current character to ensure that the result
// doesn't contain garbage after the new zero terminator.
char* d = s;
char prev = 0;
char curr = s[0];
s[0] = '\0';
for (;;) {
if (curr == 0)
break;
if (curr == ' ') {
if (prev == '@' || s[1] == ' ' || s[1] == '@')
goto L_Skip;
}
d[0] = curr;
d++;
prev = curr;
L_Skip:
curr = *++s;
s[0] = '\0';
}
d[0] = '\0';
}
ASMJIT_FAVOR_SIZE static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
uint32_t i, maxId;
CpuIdResult regs;
XGetBVResult xcr0 = { 0, 0 };
cpuInfo->_archInfo.init(ArchInfo::kTypeHost);
cpuInfo->addFeature(CpuInfo::kX86FeatureI486);
// --------------------------------------------------------------------------
// [CPUID EAX=0x0]
// --------------------------------------------------------------------------
// Get vendor string/id.
x86CallCpuId(&regs, 0x0);
maxId = regs.eax;
::memcpy(cpuInfo->_vendorString + 0, &regs.ebx, 4);
::memcpy(cpuInfo->_vendorString + 4, &regs.edx, 4);
::memcpy(cpuInfo->_vendorString + 8, &regs.ecx, 4);
cpuInfo->_vendorId = x86GetCpuVendorID(cpuInfo->_vendorString);
// --------------------------------------------------------------------------
// [CPUID EAX=0x1]
// --------------------------------------------------------------------------
if (maxId >= 0x1) {
// Get feature flags in ECX/EDX and family/model in EAX.
x86CallCpuId(&regs, 0x1);
// Fill family and model fields.
cpuInfo->_family = (regs.eax >> 8) & 0x0F;
cpuInfo->_model = (regs.eax >> 4) & 0x0F;
cpuInfo->_stepping = (regs.eax ) & 0x0F;
// Use extended family and model fields.
if (cpuInfo->_family == 0x0F) {
cpuInfo->_family += ((regs.eax >> 20) & 0xFF);
cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4;
}
cpuInfo->_x86Data._processorType = ((regs.eax >> 12) & 0x03);
cpuInfo->_x86Data._brandIndex = ((regs.ebx ) & 0xFF);
cpuInfo->_x86Data._flushCacheLineSize = ((regs.ebx >> 8) & 0xFF) * 8;
cpuInfo->_x86Data._maxLogicalProcessors = ((regs.ebx >> 16) & 0xFF);
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE3);
if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCLMULQDQ);
if (regs.ecx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureMONITOR);
if (regs.ecx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSSE3);
if (regs.ecx & 0x00002000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG16B);
if (regs.ecx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_1);
if (regs.ecx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_2);
if (regs.ecx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMOVBE);
if (regs.ecx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePOPCNT);
if (regs.ecx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAESNI);
if (regs.ecx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVE);
if (regs.ecx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureOSXSAVE);
if (regs.ecx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDRAND);
if (regs.edx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSC);
if (regs.edx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureMSR);
if (regs.edx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG8B);
if (regs.edx & 0x00008000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMOV);
if (regs.edx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSH);
if (regs.edx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX);
if (regs.edx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSR);
if (regs.edx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE)
.addFeature(CpuInfo::kX86FeatureMMX2);
if (regs.edx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE)
.addFeature(CpuInfo::kX86FeatureSSE2);
if (regs.edx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMT);
// Get the content of XCR0 if supported by CPU and enabled by OS.
if ((regs.ecx & 0x0C000000U) == 0x0C000000U) {
x86CallXGetBV(&xcr0, 0);
}
// Detect AVX+.
if (regs.ecx & 0x10000000U) {
// - XCR0[2:1] == 11b
// XMM & YMM states need to be enabled by OS.
if ((xcr0.eax & 0x00000006U) == 0x00000006U) {
cpuInfo->addFeature(CpuInfo::kX86FeatureAVX);
if (regs.ecx & 0x00001000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA);
if (regs.ecx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureF16C);
}
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0x7]
// --------------------------------------------------------------------------
// Detect new features if the processor supports CPUID-07.
bool maybeMPX = false;
if (maxId >= 0x7) {
x86CallCpuId(&regs, 0x7);
if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureFSGSBASE);
if (regs.ebx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI);
if (regs.ebx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureHLE);
if (regs.ebx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureSMEP);
if (regs.ebx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI2);
if (regs.ebx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureERMS);
if (regs.ebx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureRTM);
if (regs.ebx & 0x00004000U) maybeMPX = true;
if (regs.ebx & 0x00040000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDSEED);
if (regs.ebx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureADX);
if (regs.ebx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSMAP);
if (regs.ebx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCOMMIT);
if (regs.ebx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSHOPT);
if (regs.ebx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLWB);
if (regs.ebx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSHA);
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHWT1);
// TSX is supported if at least one of `HLE` and `RTM` is supported.
if (regs.ebx & 0x00000810U) cpuInfo->addFeature(CpuInfo::kX86FeatureTSX);
// Detect AVX2.
if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) {
if (regs.ebx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX2);
}
// Detect AVX-512+.
if (regs.ebx & 0x00010000U) {
// - XCR0[2:1] == 11b
// XMM/YMM states need to be enabled by OS.
// - XCR0[7:5] == 111b
// Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 need to be enabled by the OS.
if ((xcr0.eax & 0x000000E6U) == 0x000000E6U) {
cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_F);
if (regs.ebx & 0x00020000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_DQ);
if (regs.ebx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_IFMA);
if (regs.ebx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_PFI);
if (regs.ebx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_ERI);
if (regs.ebx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_CDI);
if (regs.ebx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_BW);
if (regs.ebx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VL);
if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VBMI);
if (regs.ecx & 0x00004000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VPOPCNTDQ);
if (regs.edx & 0x00000004U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_4VNNIW);
if (regs.edx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_4FMAPS);
}
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0xD]
// --------------------------------------------------------------------------
if (maxId >= 0xD) {
x86CallCpuId(&regs, 0xD, 0);
// Both CPUID result and XCR0 has to be enabled to have support for MPX.
if (((regs.eax & xcr0.eax) & 0x00000018U) == 0x00000018U && maybeMPX)
cpuInfo->addFeature(CpuInfo::kX86FeatureMPX);
x86CallCpuId(&regs, 0xD, 1);
if (regs.eax & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVEOPT);
if (regs.eax & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVEC);
if (regs.eax & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVES);
}
// --------------------------------------------------------------------------
// [CPUID EAX=0x80000000...maxId]
// --------------------------------------------------------------------------
// The highest EAX that we understand.
uint32_t kHighestProcessedEAX = 0x80000008U;
// Several CPUID calls are required to get the whole branc string. It's easy
// to copy one DWORD at a time instead of performing a byte copy.
uint32_t* brand = reinterpret_cast<uint32_t*>(cpuInfo->_brandString);
i = maxId = 0x80000000U;
do {
x86CallCpuId(&regs, i);
switch (i) {
case 0x80000000U:
maxId = std::min<uint32_t>(regs.eax, kHighestProcessedEAX);
break;
case 0x80000001U:
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureLAHFSAHF);
if (regs.ecx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureLZCNT);
if (regs.ecx & 0x00000040U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4A);
if (regs.ecx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureMSSE);
if (regs.ecx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHW);
if (regs.ecx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureTBM);
if (regs.edx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureNX);
if (regs.edx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSROPT);
if (regs.edx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX2);
if (regs.edx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSCP);
if (regs.edx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW2)
.addFeature(CpuInfo::kX86FeatureMMX2);
if (regs.edx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW);
if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) {
if (regs.ecx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureXOP);
if (regs.ecx & 0x00010000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA4);
}
// These seem to be only supported by AMD.
if (cpuInfo->getVendorId() == CpuInfo::kVendorAMD) {
if (regs.ecx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureALTMOVCR8);
}
break;
case 0x80000002U:
case 0x80000003U:
case 0x80000004U:
*brand++ = regs.eax;
*brand++ = regs.ebx;
*brand++ = regs.ecx;
*brand++ = regs.edx;
// Go directly to the last one.
if (i == 0x80000004U) i = 0x80000008U - 1;
break;
case 0x80000008U:
if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLZERO);
break;
}
} while (++i <= maxId);
// Simplify CPU brand string by removing unnecessary spaces.
x86SimplifyBrandString(cpuInfo->_brandString);
}
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
// ============================================================================
// [asmjit::CpuInfo - Detect - HWThreadsCount]
// ============================================================================
static ASMJIT_INLINE uint32_t cpuDetectHWThreadsCount() noexcept {
#if ASMJIT_OS_WINDOWS
SYSTEM_INFO info;
::GetSystemInfo(&info);
return info.dwNumberOfProcessors;
#elif ASMJIT_OS_POSIX && defined(_SC_NPROCESSORS_ONLN)
long res = ::sysconf(_SC_NPROCESSORS_ONLN);
if (res <= 0) return 1;
return static_cast<uint32_t>(res);
#else
return 1;
#endif
}
// ============================================================================
// [asmjit::CpuInfo - Detect]
// ============================================================================
ASMJIT_FAVOR_SIZE void CpuInfo::detect() noexcept {
reset();
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
armDetectCpuInfo(this);
#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
x86DetectCpuInfo(this);
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
_hwThreadsCount = cpuDetectHWThreadsCount();
}
// ============================================================================
// [asmjit::CpuInfo - GetHost]
// ============================================================================
struct HostCpuInfo : public CpuInfo {
ASMJIT_INLINE HostCpuInfo() noexcept : CpuInfo() { detect(); }
};
const CpuInfo& CpuInfo::getHost() noexcept {
static HostCpuInfo host;
return host;
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,373 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_CPUINFO_H
#define _ASMJIT_BASE_CPUINFO_H
// [Dependencies]
#include "../base/arch.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::CpuFeatures]
// ============================================================================
class CpuFeatures {
public:
typedef uintptr_t BitWord;
enum {
kMaxFeatures = 128,
kBitWordSize = static_cast<int>(sizeof(BitWord)) * 8,
kNumBitWords = kMaxFeatures / kBitWordSize
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE CpuFeatures() noexcept { reset(); }
ASMJIT_INLINE CpuFeatures(const CpuFeatures& other) noexcept { init(other); }
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void init(const CpuFeatures& other) noexcept { ::memcpy(this, &other, sizeof(*this)); }
ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); }
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
//! Get all features as `BitWord` array.
ASMJIT_INLINE BitWord* getBits() noexcept { return _bits; }
//! Get all features as `BitWord` array (const).
ASMJIT_INLINE const BitWord* getBits() const noexcept { return _bits; }
//! Get if feature `feature` is present.
ASMJIT_INLINE bool has(uint32_t feature) const noexcept {
ASMJIT_ASSERT(feature < kMaxFeatures);
uint32_t idx = feature / kBitWordSize;
uint32_t bit = feature % kBitWordSize;
return static_cast<bool>((_bits[idx] >> bit) & 0x1);
}
//! Get if all features as defined by `other` are present.
ASMJIT_INLINE bool hasAll(const CpuFeatures& other) const noexcept {
for (uint32_t i = 0; i < kNumBitWords; i++)
if ((_bits[i] & other._bits[i]) != other._bits[i])
return false;
return true;
}
//! Add a CPU `feature`.
ASMJIT_INLINE CpuFeatures& add(uint32_t feature) noexcept {
ASMJIT_ASSERT(feature < kMaxFeatures);
uint32_t idx = feature / kBitWordSize;
uint32_t bit = feature % kBitWordSize;
_bits[idx] |= static_cast<BitWord>(1) << bit;
return *this;
}
//! Remove a CPU `feature`.
ASMJIT_INLINE CpuFeatures& remove(uint32_t feature) noexcept {
ASMJIT_ASSERT(feature < kMaxFeatures);
uint32_t idx = feature / kBitWordSize;
uint32_t bit = feature % kBitWordSize;
_bits[idx] &= ~(static_cast<BitWord>(1) << bit);
return *this;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
BitWord _bits[kNumBitWords];
};
// ============================================================================
// [asmjit::CpuInfo]
// ============================================================================
//! CPU information.
class CpuInfo {
public:
//! CPU vendor ID.
ASMJIT_ENUM(Vendor) {
kVendorNone = 0, //!< Generic or unknown.
kVendorIntel = 1, //!< Intel vendor.
kVendorAMD = 2, //!< AMD vendor.
kVendorVIA = 3 //!< VIA vendor.
};
//! ARM/ARM64 CPU features.
ASMJIT_ENUM(ArmFeatures) {
kArmFeatureV6 = 1, //!< ARMv6 instruction set.
kArmFeatureV7, //!< ARMv7 instruction set.
kArmFeatureV8, //!< ARMv8 instruction set.
kArmFeatureTHUMB, //!< CPU provides THUMB v1 instruction set (THUMB mode).
kArmFeatureTHUMB2, //!< CPU provides THUMB v2 instruction set (THUMB mode).
kArmFeatureVFPv2, //!< CPU provides VFPv2 instruction set.
kArmFeatureVFPv3, //!< CPU provides VFPv3 instruction set.
kArmFeatureVFPv4, //!< CPU provides VFPv4 instruction set.
kArmFeatureVFP_D32, //!< CPU provides 32 VFP-D (64-bit) registers.
kArmFeatureEDSP, //!< CPU provides EDSP extensions.
kArmFeatureASIMD, //!< CPU provides 'Advanced SIMD'.
kArmFeatureIDIVA, //!< CPU provides hardware SDIV and UDIV (ARM mode).
kArmFeatureIDIVT, //!< CPU provides hardware SDIV and UDIV (THUMB mode).
kArmFeatureAES, //!< CPU provides AES instructions (ARM64 only).
kArmFeatureCRC32, //!< CPU provides CRC32 instructions.
kArmFeaturePMULL, //!< CPU provides PMULL instructions (ARM64 only).
kArmFeatureSHA1, //!< CPU provides SHA1 instructions.
kArmFeatureSHA256, //!< CPU provides SHA256 instructions.
kArmFeatureAtomics64, //!< CPU provides 64-bit load/store atomics (ARM64 only).
kArmFeaturesCount //!< Count of ARM/ARM64 CPU features.
};
//! X86/X64 CPU features.
ASMJIT_ENUM(X86Features) {
kX86FeatureI486 = 1, //!< CPU is at least I486.
kX86FeatureNX, //!< CPU has Not-Execute-Bit.
kX86FeatureMT, //!< CPU has multi-threading.
kX86FeatureALTMOVCR8, //!< CPU supports `LOCK MOV CR8` (AMD CPUs).
kX86FeatureCMOV, //!< CPU has CMOV.
kX86FeatureCMPXCHG8B, //!< CPU has CMPXCHG8B.
kX86FeatureCMPXCHG16B, //!< CPU has CMPXCHG16B (x64).
kX86FeatureMSR, //!< CPU has RDMSR/WRMSR.
kX86FeatureRDTSC, //!< CPU has RDTSC.
kX86FeatureRDTSCP, //!< CPU has RDTSCP.
kX86FeatureCLFLUSH, //!< CPU has CLFUSH.
kX86FeatureCLFLUSHOPT, //!< CPU has CLFUSHOPT.
kX86FeatureCLWB, //!< CPU has CLWB.
kX86FeatureCLZERO, //!< CPU has CLZERO.
kX86FeaturePCOMMIT, //!< CPU has PCOMMIT.
kX86FeaturePREFETCHW, //!< CPU has PREFETCHW.
kX86FeaturePREFETCHWT1, //!< CPU has PREFETCHWT1.
kX86FeatureLAHFSAHF, //!< CPU has LAHF/SAHF.
kX86FeatureFXSR, //!< CPU has FXSAVE/FXRSTOR.
kX86FeatureFXSROPT, //!< CPU has FXSAVE/FXRSTOR (optimized).
kX86FeatureMMX, //!< CPU has MMX.
kX86FeatureMMX2, //!< CPU has extended MMX.
kX86Feature3DNOW, //!< CPU has 3DNOW.
kX86Feature3DNOW2, //!< CPU has 3DNOW2 (enhanced).
kX86FeatureGEODE, //!< CPU has GEODE extensions (few additions to 3DNOW).
kX86FeatureSSE, //!< CPU has SSE.
kX86FeatureSSE2, //!< CPU has SSE2.
kX86FeatureSSE3, //!< CPU has SSE3.
kX86FeatureSSSE3, //!< CPU has SSSE3.
kX86FeatureSSE4A, //!< CPU has SSE4.A.
kX86FeatureSSE4_1, //!< CPU has SSE4.1.
kX86FeatureSSE4_2, //!< CPU has SSE4.2.
kX86FeatureMSSE, //!< CPU has Misaligned SSE (MSSE).
kX86FeatureMONITOR, //!< CPU has MONITOR and MWAIT.
kX86FeatureMOVBE, //!< CPU has MOVBE.
kX86FeaturePOPCNT, //!< CPU has POPCNT.
kX86FeatureLZCNT, //!< CPU has LZCNT.
kX86FeatureAESNI, //!< CPU has AESNI.
kX86FeaturePCLMULQDQ, //!< CPU has PCLMULQDQ.
kX86FeatureRDRAND, //!< CPU has RDRAND.
kX86FeatureRDSEED, //!< CPU has RDSEED.
kX86FeatureSMAP, //!< CPU has SMAP (supervisor-mode access prevention).
kX86FeatureSMEP, //!< CPU has SMEP (supervisor-mode execution prevention).
kX86FeatureSHA, //!< CPU has SHA-1 and SHA-256.
kX86FeatureXSAVE, //!< CPU has XSAVE support (XSAVE/XRSTOR, XSETBV/XGETBV, and XCR).
kX86FeatureXSAVEC, //!< CPU has XSAVEC support (XSAVEC).
kX86FeatureXSAVES, //!< CPU has XSAVES support (XSAVES/XRSTORS).
kX86FeatureXSAVEOPT, //!< CPU has XSAVEOPT support (XSAVEOPT/XSAVEOPT64).
kX86FeatureOSXSAVE, //!< CPU has XSAVE enabled by OS.
kX86FeatureAVX, //!< CPU has AVX.
kX86FeatureAVX2, //!< CPU has AVX2.
kX86FeatureF16C, //!< CPU has F16C.
kX86FeatureFMA, //!< CPU has FMA.
kX86FeatureFMA4, //!< CPU has FMA4.
kX86FeatureXOP, //!< CPU has XOP.
kX86FeatureBMI, //!< CPU has BMI (bit manipulation instructions #1).
kX86FeatureBMI2, //!< CPU has BMI2 (bit manipulation instructions #2).
kX86FeatureADX, //!< CPU has ADX (multi-precision add-carry instruction extensions).
kX86FeatureTBM, //!< CPU has TBM (trailing bit manipulation).
kX86FeatureMPX, //!< CPU has MPX (memory protection extensions).
kX86FeatureHLE, //!< CPU has HLE.
kX86FeatureRTM, //!< CPU has RTM.
kX86FeatureTSX, //!< CPU has TSX.
kX86FeatureERMS, //!< CPU has ERMS (enhanced REP MOVSB/STOSB).
kX86FeatureFSGSBASE, //!< CPU has FSGSBASE.
kX86FeatureAVX512_F, //!< CPU has AVX512-F (foundation).
kX86FeatureAVX512_CDI, //!< CPU has AVX512-CDI (conflict detection).
kX86FeatureAVX512_PFI, //!< CPU has AVX512-PFI (prefetch instructions).
kX86FeatureAVX512_ERI, //!< CPU has AVX512-ERI (exponential and reciprocal).
kX86FeatureAVX512_DQ, //!< CPU has AVX512-DQ (DWORD/QWORD).
kX86FeatureAVX512_BW, //!< CPU has AVX512-BW (BYTE/WORD).
kX86FeatureAVX512_VL, //!< CPU has AVX512-VL (vector length extensions).
kX86FeatureAVX512_IFMA, //!< CPU has AVX512-IFMA (integer fused-multiply-add using 52-bit precision).
kX86FeatureAVX512_VBMI, //!< CPU has AVX512-VBMI (vector byte manipulation).
kX86FeatureAVX512_VPOPCNTDQ, //!< CPU has AVX512-VPOPCNTDQ (VPOPCNT[D|Q] instructions).
kX86FeatureAVX512_4VNNIW, //!< CPU has AVX512-VNNIW (vector NN instructions word variable precision).
kX86FeatureAVX512_4FMAPS, //!< CPU has AVX512-FMAPS (FMA packed single).
kX86FeaturesCount //!< Count of X86/X64 CPU features.
};
// --------------------------------------------------------------------------
// [ArmInfo]
// --------------------------------------------------------------------------
struct ArmData {
};
// --------------------------------------------------------------------------
// [X86Info]
// --------------------------------------------------------------------------
struct X86Data {
uint32_t _processorType; //!< Processor type.
uint32_t _brandIndex; //!< Brand index.
uint32_t _flushCacheLineSize; //!< Flush cache line size (in bytes).
uint32_t _maxLogicalProcessors; //!< Maximum number of addressable IDs for logical processors.
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE CpuInfo() noexcept { reset(); }
ASMJIT_INLINE CpuInfo(const CpuInfo& other) noexcept { init(other); }
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
//! Initialize CpuInfo to the given architecture, see \ArchInfo.
ASMJIT_INLINE void initArch(uint32_t archType, uint32_t archMode = 0) noexcept {
_archInfo.init(archType, archMode);
}
ASMJIT_INLINE void init(const CpuInfo& other) noexcept { ::memcpy(this, &other, sizeof(*this)); }
ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); }
// --------------------------------------------------------------------------
// [Detect]
// --------------------------------------------------------------------------
ASMJIT_API void detect() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get generic architecture information.
ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _archInfo; }
//! Get CPU architecture type, see \ArchInfo::Type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archInfo.getType(); }
//! Get CPU architecture sub-type, see \ArchInfo::SubType.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _archInfo.getSubType(); }
//! Get CPU vendor ID.
ASMJIT_INLINE uint32_t getVendorId() const noexcept { return _vendorId; }
//! Get CPU family ID.
ASMJIT_INLINE uint32_t getFamily() const noexcept { return _family; }
//! Get CPU model ID.
ASMJIT_INLINE uint32_t getModel() const noexcept { return _model; }
//! Get CPU stepping.
ASMJIT_INLINE uint32_t getStepping() const noexcept { return _stepping; }
//! Get number of hardware threads available.
ASMJIT_INLINE uint32_t getHwThreadsCount() const noexcept {
return _hwThreadsCount;
}
//! Get all CPU features.
ASMJIT_INLINE const CpuFeatures& getFeatures() const noexcept { return _features; }
//! Get whether CPU has a `feature`.
ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept { return _features.has(feature); }
//! Add a CPU `feature`.
ASMJIT_INLINE CpuInfo& addFeature(uint32_t feature) noexcept { _features.add(feature); return *this; }
//! Get CPU vendor string.
ASMJIT_INLINE const char* getVendorString() const noexcept { return _vendorString; }
//! Get CPU brand string.
ASMJIT_INLINE const char* getBrandString() const noexcept { return _brandString; }
// --------------------------------------------------------------------------
// [Accessors - ARM]
// --------------------------------------------------------------------------
// --------------------------------------------------------------------------
// [Accessors - X86]
// --------------------------------------------------------------------------
//! Get processor type.
ASMJIT_INLINE uint32_t getX86ProcessorType() const noexcept {
return _x86Data._processorType;
}
//! Get brand index.
ASMJIT_INLINE uint32_t getX86BrandIndex() const noexcept {
return _x86Data._brandIndex;
}
//! Get flush cache line size.
ASMJIT_INLINE uint32_t getX86FlushCacheLineSize() const noexcept {
return _x86Data._flushCacheLineSize;
}
//! Get maximum logical processors count.
ASMJIT_INLINE uint32_t getX86MaxLogicalProcessors() const noexcept {
return _x86Data._maxLogicalProcessors;
}
// --------------------------------------------------------------------------
// [Statics]
// --------------------------------------------------------------------------
//! Get the host CPU information.
ASMJIT_API static const CpuInfo& getHost() noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
ArchInfo _archInfo; //!< CPU architecture information.
uint32_t _vendorId; //!< CPU vendor id, see \ref Vendor.
uint32_t _family; //!< CPU family ID.
uint32_t _model; //!< CPU model ID.
uint32_t _stepping; //!< CPU stepping.
uint32_t _hwThreadsCount; //!< Number of hardware threads.
CpuFeatures _features; //!< CPU features.
char _vendorString[16]; //!< CPU vendor string.
char _brandString[64]; //!< CPU brand string.
// Architecture specific data.
union {
ArmData _armData;
X86Data _x86Data;
};
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_CPUINFO_H

View file

@ -0,0 +1,186 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/arch.h"
#include "../base/func.h"
#if defined(ASMJIT_BUILD_X86)
#include "../x86/x86internal_p.h"
#include "../x86/x86operand.h"
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
#include "../arm/arminternal_p.h"
#include "../arm/armoperand.h"
#endif // ASMJIT_BUILD_ARM
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CallConv - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId) noexcept {
reset();
#if defined(ASMJIT_BUILD_X86)
if (CallConv::isX86Family(ccId))
return X86Internal::initCallConv(*this, ccId);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (CallConv::isArmFamily(ccId))
return ArmInternal::initCallConv(*this, ccId);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncDetail - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& sign) {
uint32_t ccId = sign.getCallConv();
CallConv& cc = _callConv;
uint32_t argCount = sign.getArgCount();
if (ASMJIT_UNLIKELY(argCount > kFuncArgCount))
return DebugUtils::errored(kErrorInvalidArgument);
ASMJIT_PROPAGATE(cc.init(ccId));
uint32_t gpSize = (cc.getArchType() == ArchInfo::kTypeX86) ? 4 : 8;
uint32_t deabstractDelta = TypeId::deabstractDeltaOfSize(gpSize);
const uint8_t* args = sign.getArgs();
for (uint32_t i = 0; i < argCount; i++) {
Value& arg = _args[i];
arg.initTypeId(TypeId::deabstract(args[i], deabstractDelta));
}
_argCount = static_cast<uint8_t>(argCount);
uint32_t ret = sign.getRet();
if (ret != TypeId::kVoid) {
_rets[0].initTypeId(TypeId::deabstract(ret, deabstractDelta));
_retCount = 1;
}
#if defined(ASMJIT_BUILD_X86)
if (CallConv::isX86Family(ccId))
return X86Internal::initFuncDetail(*this, sign, gpSize);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (CallConv::isArmFamily(ccId))
return ArmInternal::initFuncDetail(*this, sign, gpSize);
#endif // ASMJIT_BUILD_ARM
// We should never bubble here as if `cc.init()` succeeded then there has to
// be an implementation for the current architecture. However, stay safe.
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncFrameLayout - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncFrameLayout::init(const FuncDetail& func, const FuncFrameInfo& ffi) noexcept {
uint32_t ccId = func.getCallConv().getId();
#if defined(ASMJIT_BUILD_X86)
if (CallConv::isX86Family(ccId))
return X86Internal::initFrameLayout(*this, func, ffi);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (CallConv::isArmFamily(ccId))
return ArmInternal::initFrameLayout(*this, func, ffi);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncArgsMapper]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncArgsMapper::updateFrameInfo(FuncFrameInfo& ffi) const noexcept {
const FuncDetail* func = getFuncDetail();
if (!func) return DebugUtils::errored(kErrorInvalidState);
uint32_t ccId = func->getCallConv().getId();
#if defined(ASMJIT_BUILD_X86)
if (CallConv::isX86Family(ccId))
return X86Internal::argsToFrameInfo(*this, ffi);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (CallConv::isArmFamily(ccId))
return ArmInternal::argsToFrameInfo(*this, ffi);
#endif // ASMJIT_BUILD_X86
return DebugUtils::errored(kErrorInvalidArch);
}
// ============================================================================
// [asmjit::FuncUtils]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncUtils::emitProlog(CodeEmitter* emitter, const FuncFrameLayout& layout) {
#if defined(ASMJIT_BUILD_X86)
if (emitter->getArchInfo().isX86Family())
return X86Internal::emitProlog(static_cast<X86Emitter*>(emitter), layout);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (emitter->getArchInfo().isArmFamily())
return ArmInternal::emitProlog(static_cast<ArmEmitter*>(emitter), layout);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArch);
}
ASMJIT_FAVOR_SIZE Error FuncUtils::emitEpilog(CodeEmitter* emitter, const FuncFrameLayout& layout) {
#if defined(ASMJIT_BUILD_X86)
if (emitter->getArchInfo().isX86Family())
return X86Internal::emitEpilog(static_cast<X86Emitter*>(emitter), layout);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (emitter->getArchInfo().isArmFamily())
return ArmInternal::emitEpilog(static_cast<ArmEmitter*>(emitter), layout);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArch);
}
ASMJIT_FAVOR_SIZE Error FuncUtils::allocArgs(CodeEmitter* emitter, const FuncFrameLayout& layout, const FuncArgsMapper& args) {
#if defined(ASMJIT_BUILD_X86)
if (emitter->getArchInfo().isX86Family())
return X86Internal::allocArgs(static_cast<X86Emitter*>(emitter), layout, args);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
if (emitter->getArchInfo().isArmFamily())
return ArmInternal::allocArgs(static_cast<ArmEmitter*>(emitter), layout, args);
#endif // ASMJIT_BUILD_ARM
return DebugUtils::errored(kErrorInvalidArch);
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,118 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/globals.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
#if !defined(ASMJIT_DISABLE_TEXT)
static const char errorMessages[] =
"Ok\0"
"No heap memory\0"
"No virtual memory\0"
"Invalid argument\0"
"Invalid state\0"
"Invalid architecture\0"
"Not initialized\0"
"Already initialized\0"
"Feature not enabled\0"
"Slot occupied\0"
"No code generated\0"
"Code too large\0"
"Invalid label\0"
"Label index overflow\0"
"Label already bound\0"
"Label already defined\0"
"Label name too long\0"
"Invalid label name\0"
"Invalid parent label\0"
"Non-local label can't have parent\0"
"Relocation index overflow\0"
"Invalid relocation entry\0"
"Invalid instruction\0"
"Invalid register type\0"
"Invalid register kind\0"
"Invalid register's physical id\0"
"Invalid register's virtual id\0"
"Invalid prefix combination\0"
"Invalid lock prefix\0"
"Invalid xacquire prefix\0"
"Invalid xrelease prefix\0"
"Invalid rep prefix\0"
"Invalid rex prefix\0"
"Invalid mask, expected {k}\0"
"Invalid use of {k}\0"
"Invalid use of {k}{z}\0"
"Invalid broadcast {1tox}\0"
"Invalid {er} or {sae} option\0"
"Invalid address\0"
"Invalid address index\0"
"Invalid address scale\0"
"Invalid use of 64-bit address\0"
"Invalid displacement\0"
"Invalid segment\0"
"Invalid immediate value\0"
"Invalid operand size\0"
"Ambiguous operand size\0"
"Operand size mismatch\0"
"Invalid type-info\0"
"Invalid use of a low 8-bit GPB register\0"
"Invalid use of a 64-bit GPQ register in 32-bit mode\0"
"Invalid use of an 80-bit float\0"
"Not consecutive registers\0"
"No more physical registers\0"
"Overlapped registers\0"
"Overlapping register and arguments base-address register\0"
"Unknown error\0";
#endif // ASMJIT_DISABLE_TEXT
ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
#if !defined(ASMJIT_DISABLE_TEXT)
return Utils::findPackedString(errorMessages, std::min<Error>(err, kErrorCount));
#else
static const char noMessage[] = "";
return noMessage;
#endif
}
ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
#if ASMJIT_OS_WINDOWS
::OutputDebugStringA(str);
#else
::fputs(str, stderr);
#endif
}
ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
char str[1024];
snprintf(str, 1024,
"[asmjit] Assertion failed at %s (line %d):\n"
"[asmjit] %s\n", file, line, msg);
// Support buggy `snprintf` implementations.
str[1023] = '\0';
debugOutput(str);
::abort();
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,341 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_GLOBALS_H
#define _ASMJIT_BASE_GLOBALS_H
// [Dependencies]
#include "../asmjit_build.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::Globals]
// ============================================================================
enum { kInvalidValue = 0xFFFFFFFFU };
//! AsmJit globals.
namespace Globals {
//! Invalid index
//!
//! Invalid index is the last possible index that is never used in practice. In
//! AsmJit it is used exclusively with strings to indicate the the length of the
//! string is not known and has to be determined.
static const size_t kInvalidIndex = ~static_cast<size_t>(0);
//! Invalid base address.
static const uint64_t kNoBaseAddress = ~static_cast<uint64_t>(0);
//! Global definitions.
ASMJIT_ENUM(Defs) {
//! Invalid register id.
kInvalidRegId = 0xFF,
//! Host memory allocator overhead.
kAllocOverhead = static_cast<int>(sizeof(intptr_t) * 4),
//! Aggressive growing strategy threshold.
kAllocThreshold = 8192 * 1024
};
ASMJIT_ENUM(Limits) {
//! Count of register kinds that are important to Function API and CodeCompiler.
//! The target architecture can define more register kinds for special registers,
//! but these will never map to virtual registers and will never be used to pass
//! and return function arguments and function return values, respectively.
kMaxVRegKinds = 4,
//! Maximum number of physical registers of all kinds of all supported
//! architectures. This is only important for \ref CodeCompiler and its
//! \ref RAPass (register allocator pass).
//!
//! NOTE: The distribution of these registers is architecture specific.
kMaxPhysRegs = 64,
//! Maximum alignment.
kMaxAlignment = 64,
//! Maximum label or symbol length in bytes (take into consideration that a
//! single UTF-8 character can take more than single byte to encode it).
kMaxLabelLength = 2048
};
} // Globals namespace
// ============================================================================
// [asmjit::Error]
// ============================================================================
//! AsmJit error type (uint32_t).
typedef uint32_t Error;
//! AsmJit error codes.
ASMJIT_ENUM(ErrorCode) {
//! No error (success).
//!
//! This is default state and state you want.
kErrorOk = 0,
//! Heap memory allocation failed.
kErrorNoHeapMemory,
//! Virtual memory allocation failed.
kErrorNoVirtualMemory,
//! Invalid argument.
kErrorInvalidArgument,
//! Invalid state.
//!
//! If this error is returned it means that either you are doing something
//! wrong or AsmJit caught itself by doing something wrong. This error should
//! not be underestimated.
kErrorInvalidState,
//! Invalid or incompatible architecture.
kErrorInvalidArch,
//! The object is not initialized.
kErrorNotInitialized,
//! The object is already initialized.
kErrorAlreadyInitialized,
//! Built-in feature was disabled at compile time and it's not available.
kErrorFeatureNotEnabled,
//! CodeHolder can't have attached more than one \ref Assembler at a time.
kErrorSlotOccupied,
//! No code generated.
//!
//! Returned by runtime if the \ref CodeHolder contains no code.
kErrorNoCodeGenerated,
//! Code generated is larger than allowed.
kErrorCodeTooLarge,
//! Attempt to use uninitialized label.
kErrorInvalidLabel,
//! Label index overflow - a single `Assembler` instance can hold more than
//! 2 billion labels (2147483391 to be exact). If there is an attempt to
//! create more labels this error is returned.
kErrorLabelIndexOverflow,
//! Label is already bound.
kErrorLabelAlreadyBound,
//! Label is already defined (named labels).
kErrorLabelAlreadyDefined,
//! Label name is too long.
kErrorLabelNameTooLong,
//! Label must always be local if it's anonymous (without a name).
kErrorInvalidLabelName,
//! Parent id passed to `CodeHolder::newNamedLabelId()` was invalid.
kErrorInvalidParentLabel,
//! Parent id specified for a non-local (global) label.
kErrorNonLocalLabelCantHaveParent,
//! Relocation index overflow.
kErrorRelocIndexOverflow,
//! Invalid relocation entry.
kErrorInvalidRelocEntry,
//! Invalid instruction.
kErrorInvalidInstruction,
//! Invalid register type.
kErrorInvalidRegType,
//! Invalid register kind.
kErrorInvalidRegKind,
//! Invalid register's physical id.
kErrorInvalidPhysId,
//! Invalid register's virtual id.
kErrorInvalidVirtId,
//! Invalid prefix combination.
kErrorInvalidPrefixCombination,
//! Invalid LOCK prefix.
kErrorInvalidLockPrefix,
//! Invalid XACQUIRE prefix.
kErrorInvalidXAcquirePrefix,
//! Invalid XACQUIRE prefix.
kErrorInvalidXReleasePrefix,
//! Invalid REP prefix.
kErrorInvalidRepPrefix,
//! Invalid REX prefix.
kErrorInvalidRexPrefix,
//! Invalid mask register (not 'k').
kErrorInvalidKMaskReg,
//! Invalid {k} use (not supported by the instruction).
kErrorInvalidKMaskUse,
//! Invalid {k}{z} use (not supported by the instruction).
kErrorInvalidKZeroUse,
//! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox}.
kErrorInvalidBroadcast,
//! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512).
kErrorInvalidEROrSAE,
//! Invalid address used (not encodable).
kErrorInvalidAddress,
//! Invalid index register used in memory address (not encodable).
kErrorInvalidAddressIndex,
//! Invalid address scale (not encodable).
kErrorInvalidAddressScale,
//! Invalid use of 64-bit address.
kErrorInvalidAddress64Bit,
//! Invalid displacement (not encodable).
kErrorInvalidDisplacement,
//! Invalid segment (X86).
kErrorInvalidSegment,
//! Invalid immediate (out of bounds on X86 and invalid pattern on ARM).
kErrorInvalidImmediate,
//! Invalid operand size.
kErrorInvalidOperandSize,
//! Ambiguous operand size (memory has zero size while it's required to determine the operation type.
kErrorAmbiguousOperandSize,
//! Mismatching operand size (size of multiple operands doesn't match the operation size).
kErrorOperandSizeMismatch,
//! Invalid TypeId.
kErrorInvalidTypeId,
//! Invalid use of a 8-bit GPB-HIGH register.
kErrorInvalidUseOfGpbHi,
//! Invalid use of a 64-bit GPQ register in 32-bit mode.
kErrorInvalidUseOfGpq,
//! Invalid use of an 80-bit float (TypeId::kF80).
kErrorInvalidUseOfF80,
//! Some registers in the instruction muse be consecutive (some ARM and AVX512 neural-net instructions).
kErrorNotConsecutiveRegs,
//! AsmJit requires a physical register, but no one is available.
kErrorNoMorePhysRegs,
//! A variable has been assigned more than once to a function argument (CodeCompiler).
kErrorOverlappedRegs,
//! Invalid register to hold stack arguments offset.
kErrorOverlappingStackRegWithRegArg,
//! Count of AsmJit error codes.
kErrorCount
};
// ============================================================================
// [asmjit::Internal]
// ============================================================================
namespace Internal {
#if defined(ASMJIT_CUSTOM_ALLOC) && \
defined(ASMJIT_CUSTOM_REALLOC) && \
defined(ASMJIT_CUSTOM_FREE)
static ASMJIT_INLINE void* allocMemory(size_t size) noexcept { return ASMJIT_CUSTOM_ALLOC(size); }
static ASMJIT_INLINE void* reallocMemory(void* p, size_t size) noexcept { return ASMJIT_CUSTOM_REALLOC(p, size); }
static ASMJIT_INLINE void releaseMemory(void* p) noexcept { ASMJIT_CUSTOM_FREE(p); }
#elif !defined(ASMJIT_CUSTOM_ALLOC) && \
!defined(ASMJIT_CUSTOM_REALLOC) && \
!defined(ASMJIT_CUSTOM_FREE)
static ASMJIT_INLINE void* allocMemory(size_t size) noexcept { return ::malloc(size); }
static ASMJIT_INLINE void* reallocMemory(void* p, size_t size) noexcept { return ::realloc(p, size); }
static ASMJIT_INLINE void releaseMemory(void* p) noexcept { ::free(p); }
#else
# error "[asmjit] You must provide either none or all of ASMJIT_CUSTOM_[ALLOC|REALLOC|FREE]"
#endif
//! Cast designed to cast between function and void* pointers.
template<typename Dst, typename Src>
static ASMJIT_INLINE Dst ptr_cast(Src p) noexcept { return (Dst)p; }
} // Internal namespace
template<typename Func>
static ASMJIT_INLINE Func ptr_as_func(void* func) noexcept { return Internal::ptr_cast<Func, void*>(func); }
template<typename Func>
static ASMJIT_INLINE void* func_as_ptr(Func func) noexcept { return Internal::ptr_cast<void*, Func>(func); }
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
namespace DebugUtils {
//! Returns the error `err` passed.
//!
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can
//! help with tracing the origin of any error reported / returned by AsmJit.
static ASMJIT_INLINE Error errored(Error err) noexcept { return err; }
//! Get a printable version of `asmjit::Error` code.
ASMJIT_API const char* errorAsString(Error err) noexcept;
//! Called to output debugging message(s).
ASMJIT_API void debugOutput(const char* str) noexcept;
//! Called on assertion failure.
//!
//! \param file Source file name where it happened.
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertions put a breakpoint at assertionFailed()
//! function (asmjit/base/globals.cpp) and check the call stack to locate the
//! failing code.
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
#if defined(ASMJIT_DEBUG)
# define ASMJIT_ASSERT(exp) \
do { \
if (ASMJIT_LIKELY(exp)) \
break; \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #exp); \
} while (0)
# define ASMJIT_NOT_REACHED() \
do { \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, \
"ASMJIT_NOT_REACHED has been reached"); \
ASMJIT_ASSUME(0); \
} while (0)
#else
# define ASMJIT_ASSERT(exp) ASMJIT_NOP
# define ASMJIT_NOT_REACHED() ASMJIT_ASSUME(0)
#endif // DEBUG
//! \internal
//!
//! Used by AsmJit to propagate a possible `Error` produced by `...` to the caller.
#define ASMJIT_PROPAGATE(...) \
do { \
::asmjit::Error _err = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(_err)) \
return _err; \
} while (0)
} // DebugUtils namespace
// ============================================================================
// [asmjit::Init / NoInit]
// ============================================================================
#if !defined(ASMJIT_DOCGEN)
struct _Init {};
static const _Init Init = {};
struct _NoInit {};
static const _NoInit NoInit = {};
#endif // !ASMJIT_DOCGEN
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_GLOBALS_H

View file

@ -0,0 +1,77 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if defined(ASMJIT_BUILD_X86)
// [Dependencies]
#include "../base/arch.h"
#include "../base/inst.h"
#if defined(ASMJIT_BUILD_X86)
# include "../x86/x86instimpl_p.h"
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
# include "../arm/arminstimpl_p.h"
#endif // ASMJIT_BUILD_ARM
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Inst - Validate]
// ============================================================================
#if !defined(ASMJIT_DISABLE_VALIDATION)
Error Inst::validate(uint32_t archType, const Detail& detail, const Operand_* operands, uint32_t count) noexcept {
#if defined(ASMJIT_BUILD_X86)
if (ArchInfo::isX86Family(archType))
return X86InstImpl::validate(archType, detail, operands, count);
#endif
#if defined(ASMJIT_BUILD_ARM)
if (ArchInfo::isArmFamily(archType))
return ArmInstImpl::validate(archType, detail, operands, count);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif
// ============================================================================
// [asmjit::Inst - CheckFeatures]
// ============================================================================
#if !defined(ASMJIT_DISABLE_EXTENSIONS)
Error Inst::checkFeatures(uint32_t archType, const Detail& detail, const Operand_* operands, uint32_t count, CpuFeatures& out) noexcept {
#if defined(ASMJIT_BUILD_X86)
if (ArchInfo::isX86Family(archType))
return X86InstImpl::checkFeatures(archType, detail, operands, count, out);
#endif
#if defined(ASMJIT_BUILD_ARM)
if (ArchInfo::isArmFamily(archType))
return ArmInstImpl::checkFeatures(archType, detail, operands, count, out);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !defined(ASMJIT_DISABLE_EXTENSIONS)
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86

View file

@ -0,0 +1,108 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_INST_H
#define _ASMJIT_BASE_INST_H
// [Dependencies]
#include "../base/cpuinfo.h"
#include "../base/operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::Inst]
// ============================================================================
//! Definitions and utilities related to instructions used by all architectures.
struct Inst {
ASMJIT_ENUM(Id) {
kIdNone = 0 //!< Invalid or uninitialized instruction id.
};
//! Describes an instruction's jump type, if any.
ASMJIT_ENUM(JumpType) {
kJumpTypeNone = 0, //!< Instruction doesn't jump (regular instruction).
kJumpTypeDirect = 1, //!< Instruction is a unconditional (direct) jump.
kJumpTypeConditional = 2, //!< Instruction is a conditional jump.
kJumpTypeCall = 3, //!< Instruction is a function call.
kJumpTypeReturn = 4 //!< Instruction is a function return.
};
// --------------------------------------------------------------------------
// [Detail]
// --------------------------------------------------------------------------
//! Instruction id, options, and extraReg packed in a single structure. This
//! structure exists to simplify analysis and validation API that requires a
//! lot of information about the instruction to be processed.
class Detail {
public:
ASMJIT_INLINE Detail() noexcept
: instId(0),
options(0),
extraReg() {}
explicit ASMJIT_INLINE Detail(uint32_t instId, uint32_t options = 0) noexcept
: instId(instId),
options(options),
extraReg() {}
ASMJIT_INLINE Detail(uint32_t instId, uint32_t options, const RegOnly& reg) noexcept
: instId(instId),
options(options),
extraReg(reg) {}
ASMJIT_INLINE Detail(uint32_t instId, uint32_t options, const Reg& reg) noexcept
: instId(instId),
options(options) { extraReg.init(reg); }
// ------------------------------------------------------------------------
// [Accessors]
// ------------------------------------------------------------------------
ASMJIT_INLINE bool hasExtraReg() const noexcept { return extraReg.isValid(); }
// ------------------------------------------------------------------------
// [Members]
// ------------------------------------------------------------------------
uint32_t instId;
uint32_t options;
RegOnly extraReg;
};
// --------------------------------------------------------------------------
// [API]
// --------------------------------------------------------------------------
#if !defined(ASMJIT_DISABLE_VALIDATION)
//! Validate the given instruction.
ASMJIT_API static Error validate(uint32_t archType, const Detail& detail, const Operand_* operands, uint32_t count) noexcept;
#endif // !ASMJIT_DISABLE_VALIDATION
#if !defined(ASMJIT_DISABLE_EXTENSIONS)
//! Check CPU features required to execute the given instruction.
ASMJIT_API static Error checkFeatures(uint32_t archType, const Detail& detail, const Operand_* operands, uint32_t count, CpuFeatures& out) noexcept;
#endif // !defined(ASMJIT_DISABLE_EXTENSIONS)
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_INST_H

View file

@ -0,0 +1,497 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_LOGGING)
// [Dependencies]
#include "../base/codeholder.h"
#include "../base/codeemitter.h"
#include "../base/logging.h"
#include "../base/utils.h"
#if !defined(ASMJIT_DISABLE_BUILDER)
# include "../base/codebuilder.h"
#endif // !ASMJIT_DISABLE_BUILDER
#if !defined(ASMJIT_DISABLE_COMPILER)
# include "../base/codecompiler.h"
#else
namespace asmjit { class VirtReg; }
#endif // !ASMJIT_DISABLE_COMPILER
#if defined(ASMJIT_BUILD_X86)
# include "../x86/x86logging_p.h"
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
# include "../arm/armlogging_p.h"
#endif // ASMJIT_BUILD_ARM
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Logger - Construction / Destruction]
// ============================================================================
Logger::Logger() noexcept {
_options = 0;
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
}
Logger::~Logger() noexcept {}
// ============================================================================
// [asmjit::Logger - Logging]
// ============================================================================
Error Logger::logf(const char* fmt, ...) noexcept {
Error err;
va_list ap;
va_start(ap, fmt);
err = logv(fmt, ap);
va_end(ap);
return err;
}
Error Logger::logv(const char* fmt, va_list ap) noexcept {
char buf[1024];
size_t len = vsnprintf(buf, sizeof(buf), fmt, ap);
if (len >= sizeof(buf))
len = sizeof(buf) - 1;
return log(buf, len);
}
Error Logger::logBinary(const void* data, size_t size) noexcept {
static const char prefix[] = ".data ";
static const char hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
const uint8_t* s = static_cast<const uint8_t*>(data);
size_t i = size;
char buffer[128];
::memcpy(buffer, prefix, ASMJIT_ARRAY_SIZE(prefix) - 1);
while (i) {
uint32_t n = static_cast<uint32_t>(std::min<size_t>(i, 16));
char* p = buffer + ASMJIT_ARRAY_SIZE(prefix) - 1;
i -= n;
do {
uint32_t c = s[0];
p[0] = hex[c >> 4];
p[1] = hex[c & 15];
p += 2;
s += 1;
} while (--n);
*p++ = '\n';
ASMJIT_PROPAGATE(log(buffer, (size_t)(p - buffer)));
}
return kErrorOk;
}
// ============================================================================
// [asmjit::Logger - Indentation]
// ============================================================================
void Logger::setIndentation(const char* indentation) noexcept {
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
if (!indentation)
return;
size_t length = Utils::strLen(indentation, ASMJIT_ARRAY_SIZE(_indentation) - 1);
::memcpy(_indentation, indentation, length);
}
// ============================================================================
// [asmjit::FileLogger - Construction / Destruction]
// ============================================================================
FileLogger::FileLogger(FILE* stream) noexcept : _stream(nullptr) { setStream(stream); }
FileLogger::~FileLogger() noexcept {}
// ============================================================================
// [asmjit::FileLogger - Logging]
// ============================================================================
Error FileLogger::_log(const char* buf, size_t len) noexcept {
if (!_stream)
return kErrorOk;
if (len == Globals::kInvalidIndex)
len = strlen(buf);
fwrite(buf, 1, len, _stream);
return kErrorOk;
}
// ============================================================================
// [asmjit::StringLogger - Construction / Destruction]
// ============================================================================
StringLogger::StringLogger() noexcept {}
StringLogger::~StringLogger() noexcept {}
// ============================================================================
// [asmjit::StringLogger - Logging]
// ============================================================================
Error StringLogger::_log(const char* buf, size_t len) noexcept {
return _stringBuilder.appendString(buf, len);
}
// ============================================================================
// [asmjit::Logging]
// ============================================================================
Error Logging::formatLabel(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t labelId) noexcept {
const LabelEntry* le = emitter->getCode()->getLabelEntry(labelId);
if (ASMJIT_UNLIKELY(!le))
return sb.appendFormat("InvalidLabel[Id=%u]", static_cast<unsigned int>(labelId));
if (le->hasName()) {
if (le->hasParent()) {
uint32_t parentId = le->getParentId();
const LabelEntry* pe = emitter->getCode()->getLabelEntry(parentId);
if (ASMJIT_UNLIKELY(!pe))
ASMJIT_PROPAGATE(sb.appendFormat("InvalidLabel[Id=%u]", static_cast<unsigned int>(labelId)));
else if (ASMJIT_UNLIKELY(!pe->hasName()))
ASMJIT_PROPAGATE(sb.appendFormat("L%u", Operand::unpackId(parentId)));
else
ASMJIT_PROPAGATE(sb.appendString(pe->getName()));
ASMJIT_PROPAGATE(sb.appendChar('.'));
}
return sb.appendString(le->getName());
}
else {
return sb.appendFormat("L%u", Operand::unpackId(labelId));
}
}
Error Logging::formatRegister(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t regType,
uint32_t regId) noexcept {
#if defined(ASMJIT_BUILD_X86)
return X86Logging::formatRegister(sb, logOptions, emitter, archType, regType, regId);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
return ArmLogging::formatRegister(sb, logOptions, emitter, archType, regType, regId);
#endif // ASMJIT_BUILD_ARM
return kErrorInvalidArch;
}
Error Logging::formatOperand(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Operand_& op) noexcept {
#if defined(ASMJIT_BUILD_X86)
return X86Logging::formatOperand(sb, logOptions, emitter, archType, op);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
return ArmLogging::formatOperand(sb, logOptions, emitter, archType, op);
#endif // ASMJIT_BUILD_ARM
return kErrorInvalidArch;
}
Error Logging::formatInstruction(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Inst::Detail& detail, const Operand_* opArray, uint32_t opCount) noexcept {
#if defined(ASMJIT_BUILD_X86)
return X86Logging::formatInstruction(sb, logOptions, emitter, archType, detail, opArray, opCount);
#endif // ASMJIT_BUILD_X86
#if defined(ASMJIT_BUILD_ARM)
return ArmLogging::formatInstruction(sb, logOptions, emitter, archType, detail, opArray, opCount);
#endif // ASMJIT_BUILD_ARM
return kErrorInvalidArch;
}
#if !defined(ASMJIT_DISABLE_BUILDER)
static Error formatTypeId(StringBuilder& sb, uint32_t typeId) noexcept {
if (typeId == TypeId::kVoid)
return sb.appendString("void");
if (!TypeId::isValid(typeId))
return sb.appendString("unknown");
const char* typeName = "unknown";
uint32_t typeSize = TypeId::sizeOf(typeId);
uint32_t elementId = TypeId::elementOf(typeId);
switch (elementId) {
case TypeId::kIntPtr : typeName = "intptr" ; break;
case TypeId::kUIntPtr: typeName = "uintptr"; break;
case TypeId::kI8 : typeName = "i8" ; break;
case TypeId::kU8 : typeName = "u8" ; break;
case TypeId::kI16 : typeName = "i16" ; break;
case TypeId::kU16 : typeName = "u16" ; break;
case TypeId::kI32 : typeName = "i32" ; break;
case TypeId::kU32 : typeName = "u32" ; break;
case TypeId::kI64 : typeName = "i64" ; break;
case TypeId::kU64 : typeName = "u64" ; break;
case TypeId::kF32 : typeName = "f32" ; break;
case TypeId::kF64 : typeName = "f64" ; break;
case TypeId::kF80 : typeName = "f80" ; break;
case TypeId::kMask8 : typeName = "mask8" ; break;
case TypeId::kMask16 : typeName = "mask16" ; break;
case TypeId::kMask32 : typeName = "mask32" ; break;
case TypeId::kMask64 : typeName = "mask64" ; break;
case TypeId::kMmx32 : typeName = "mmx32" ; break;
case TypeId::kMmx64 : typeName = "mmx64" ; break;
}
uint32_t elementSize = TypeId::sizeOf(elementId);
if (typeSize > elementSize) {
unsigned int numElements = typeSize / elementSize;
return sb.appendFormat("%sx%u", typeName, numElements);
}
else {
return sb.appendString(typeName);
}
}
static Error formatFuncDetailValue(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
FuncDetail::Value value) noexcept {
uint32_t typeId = value.getTypeId();
ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
if (value.byReg()) {
ASMJIT_PROPAGATE(sb.appendChar(':'));
ASMJIT_PROPAGATE(Logging::formatRegister(sb, logOptions, emitter, emitter->getArchType(), value.getRegType(), value.getRegId()));
}
if (value.byStack()) {
ASMJIT_PROPAGATE(sb.appendFormat(":[%d]", static_cast<int>(value.getStackOffset())));
}
return kErrorOk;
}
static Error formatFuncRets(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
const FuncDetail& fd,
VirtReg* const* vRegs) noexcept {
if (!fd.hasRet())
return sb.appendString("void");
for (uint32_t i = 0; i < fd.getRetCount(); i++) {
if (i) ASMJIT_PROPAGATE(sb.appendString(", "));
ASMJIT_PROPAGATE(formatFuncDetailValue(sb, logOptions, emitter, fd.getRet(i)));
#if !defined(ASMJIT_DISABLE_COMPILER)
if (vRegs)
ASMJIT_PROPAGATE(sb.appendFormat(" {%s}", vRegs[i]->getName()));
#endif // !ASMJIT_DISABLE_COMPILER
}
return kErrorOk;
}
static Error formatFuncArgs(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
const FuncDetail& fd,
VirtReg* const* vRegs) noexcept {
for (uint32_t i = 0; i < fd.getArgCount(); i++) {
if (i) ASMJIT_PROPAGATE(sb.appendString(", "));
ASMJIT_PROPAGATE(formatFuncDetailValue(sb, logOptions, emitter, fd.getArg(i)));
#if !defined(ASMJIT_DISABLE_COMPILER)
if (vRegs)
ASMJIT_PROPAGATE(sb.appendFormat(" {%s}", vRegs[i]->getName()));
#endif // !ASMJIT_DISABLE_COMPILER
}
return kErrorOk;
}
Error Logging::formatNode(
StringBuilder& sb,
uint32_t logOptions,
const CodeBuilder* cb,
const CBNode* node_) noexcept {
if (node_->hasPosition())
ASMJIT_PROPAGATE(sb.appendFormat("<%04u> ", node_->getPosition()));
switch (node_->getType()) {
case CBNode::kNodeInst: {
const CBInst* node = node_->as<CBInst>();
ASMJIT_PROPAGATE(
Logging::formatInstruction(sb, logOptions, cb,
cb->getArchType(),
node->getInstDetail(), node->getOpArray(), node->getOpCount()));
break;
}
case CBNode::kNodeLabel: {
const CBLabel* node = node_->as<CBLabel>();
ASMJIT_PROPAGATE(sb.appendFormat("L%u:", Operand::unpackId(node->getId())));
break;
}
case CBNode::kNodeData: {
const CBData* node = node_->as<CBData>();
ASMJIT_PROPAGATE(sb.appendFormat(".embed (%u bytes)", node->getSize()));
break;
}
case CBNode::kNodeAlign: {
const CBAlign* node = node_->as<CBAlign>();
ASMJIT_PROPAGATE(
sb.appendFormat(".align %u (%s)",
node->getAlignment(),
node->getMode() == kAlignCode ? "code" : "data"));
break;
}
case CBNode::kNodeComment: {
const CBComment* node = node_->as<CBComment>();
ASMJIT_PROPAGATE(sb.appendFormat("; %s", node->getInlineComment()));
break;
}
case CBNode::kNodeSentinel: {
ASMJIT_PROPAGATE(sb.appendString("[sentinel]"));
break;
}
#if !defined(ASMJIT_DISABLE_COMPILER)
case CBNode::kNodeFunc: {
const CCFunc* node = node_->as<CCFunc>();
ASMJIT_PROPAGATE(formatLabel(sb, logOptions, cb, node->getId()));
ASMJIT_PROPAGATE(sb.appendString(": ["));
ASMJIT_PROPAGATE(formatFuncRets(sb, logOptions, cb, node->getDetail(), nullptr));
ASMJIT_PROPAGATE(sb.appendString("]"));
ASMJIT_PROPAGATE(sb.appendString("("));
ASMJIT_PROPAGATE(formatFuncArgs(sb, logOptions, cb, node->getDetail(), node->getArgs()));
ASMJIT_PROPAGATE(sb.appendString(")"));
break;
}
case CBNode::kNodeFuncExit: {
ASMJIT_PROPAGATE(sb.appendString("[ret]"));
break;
}
case CBNode::kNodeFuncCall: {
const CCFuncCall* node = node_->as<CCFuncCall>();
ASMJIT_PROPAGATE(
Logging::formatInstruction(sb, logOptions, cb,
cb->getArchType(),
node->getInstDetail(), node->getOpArray(), node->getOpCount()));
break;
}
#endif // !ASMJIT_DISABLE_COMPILER
default: {
ASMJIT_PROPAGATE(sb.appendFormat("[unknown (type=%u)]", node_->getType()));
break;
}
}
return kErrorOk;
}
#endif // !ASMJIT_DISABLE_BUILDER
Error Logging::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept {
size_t currentLen = sb.getLength();
size_t commentLen = comment ? Utils::strLen(comment, kMaxCommentLength) : 0;
ASMJIT_ASSERT(binLen >= dispLen);
if ((binLen != 0 && binLen != Globals::kInvalidIndex) || commentLen) {
size_t align = kMaxInstLength;
char sep = ';';
for (size_t i = (binLen == Globals::kInvalidIndex); i < 2; i++) {
size_t begin = sb.getLength();
// Append align.
if (currentLen < align)
ASMJIT_PROPAGATE(sb.appendChars(' ', align - currentLen));
// Append separator.
if (sep) {
ASMJIT_PROPAGATE(sb.appendChar(sep));
ASMJIT_PROPAGATE(sb.appendChar(' '));
}
// Append binary data or comment.
if (i == 0) {
ASMJIT_PROPAGATE(sb.appendHex(binData, binLen - dispLen - imLen));
ASMJIT_PROPAGATE(sb.appendChars('.', dispLen * 2));
ASMJIT_PROPAGATE(sb.appendHex(binData + binLen - imLen, imLen));
if (commentLen == 0) break;
}
else {
ASMJIT_PROPAGATE(sb.appendString(comment, commentLen));
}
currentLen += sb.getLength() - begin;
align += kMaxBinaryLength;
sep = '|';
}
}
return sb.appendChar('\n');
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_LOGGING

View file

@ -0,0 +1,288 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_LOGGING_H
#define _ASMJIT_BASE_LOGGING_H
// [Dependencies]
#include "../base/inst.h"
#include "../base/string.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
#if !defined(ASMJIT_DISABLE_LOGGING)
// ============================================================================
// [Forward Declarations]
// ============================================================================
class CodeEmitter;
class Reg;
struct Operand_;
#if !defined(ASMJIT_DISABLE_BUILDER)
class CodeBuilder;
class CBNode;
#endif // !ASMJIT_DISABLE_BUILDER
// ============================================================================
// [asmjit::Logger]
// ============================================================================
//! Abstract logging interface and helpers.
//!
//! This class can be inherited and reimplemented to fit into your logging
//! subsystem. When reimplementing use `Logger::_log()` method to log into
//! a custom stream.
//!
//! There are two \ref Logger implementations offered by AsmJit:
//! - \ref FileLogger - allows to log into a `FILE*` stream.
//! - \ref StringLogger - logs into a \ref StringBuilder.
class ASMJIT_VIRTAPI Logger {
public:
ASMJIT_NONCOPYABLE(Logger)
// --------------------------------------------------------------------------
// [Options]
// --------------------------------------------------------------------------
//! Logger options.
ASMJIT_ENUM(Options) {
kOptionBinaryForm = 0x00000001, //! Output instructions also in binary form.
kOptionImmExtended = 0x00000002, //! Output a meaning of some immediates.
kOptionHexImmediate = 0x00000004, //! Output constants in hexadecimal form.
kOptionHexDisplacement = 0x00000008 //! Output displacements in hexadecimal form.
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `Logger` instance.
ASMJIT_API Logger() noexcept;
//! Destroy the `Logger` instance.
ASMJIT_API virtual ~Logger() noexcept;
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
//! Log `str` - must be reimplemented.
virtual Error _log(const char* str, size_t len) noexcept = 0;
//! Log a string `str`, which is either null terminated or having `len` length.
ASMJIT_INLINE Error log(const char* str, size_t len = Globals::kInvalidIndex) noexcept { return _log(str, len); }
//! Log a content of a `StringBuilder` `str`.
ASMJIT_INLINE Error log(const StringBuilder& str) noexcept { return _log(str.getData(), str.getLength()); }
//! Format the message by using `sprintf()` and then send to `log()`.
ASMJIT_API Error logf(const char* fmt, ...) noexcept;
//! Format the message by using `vsprintf()` and then send to `log()`.
ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept;
//! Log binary data.
ASMJIT_API Error logBinary(const void* data, size_t size) noexcept;
// --------------------------------------------------------------------------
// [Options]
// --------------------------------------------------------------------------
//! Get all logger options as a single integer.
ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; }
//! Get the given logger option.
ASMJIT_INLINE bool hasOption(uint32_t option) const noexcept { return (_options & option) != 0; }
ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; }
ASMJIT_INLINE void clearOptions(uint32_t options) noexcept { _options &= ~options; }
// --------------------------------------------------------------------------
// [Indentation]
// --------------------------------------------------------------------------
//! Get indentation.
ASMJIT_INLINE const char* getIndentation() const noexcept { return _indentation; }
//! Set indentation.
ASMJIT_API void setIndentation(const char* indentation) noexcept;
//! Reset indentation.
ASMJIT_INLINE void resetIndentation() noexcept { setIndentation(nullptr); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Options, see \ref LoggerOption.
uint32_t _options;
//! Indentation.
char _indentation[12];
};
// ============================================================================
// [asmjit::FileLogger]
// ============================================================================
//! Logger that can log to a `FILE*` stream.
class ASMJIT_VIRTAPI FileLogger : public Logger {
public:
ASMJIT_NONCOPYABLE(FileLogger)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `FileLogger` that logs to a `FILE` stream.
ASMJIT_API FileLogger(FILE* stream = nullptr) noexcept;
//! Destroy the `FileLogger`.
ASMJIT_API virtual ~FileLogger() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the logging out put stream or null.
ASMJIT_INLINE FILE* getStream() const noexcept { return _stream; }
//! Set the logging output stream to `stream` or null.
//!
//! NOTE: If the `stream` is null it will disable logging, but it won't
//! stop calling `log()` unless the logger is detached from the
//! \ref Assembler.
ASMJIT_INLINE void setStream(FILE* stream) noexcept { _stream = stream; }
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
ASMJIT_API Error _log(const char* buf, size_t len = Globals::kInvalidIndex) noexcept override;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! C file stream.
FILE* _stream;
};
// ============================================================================
// [asmjit::StringLogger]
// ============================================================================
//! Logger that stores everything in an internal string buffer.
class ASMJIT_VIRTAPI StringLogger : public Logger {
public:
ASMJIT_NONCOPYABLE(StringLogger)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create new `StringLogger`.
ASMJIT_API StringLogger() noexcept;
//! Destroy the `StringLogger`.
ASMJIT_API virtual ~StringLogger() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get `char*` pointer which represents the resulting string.
//!
//! The pointer is owned by `StringLogger`, it can't be modified or freed.
ASMJIT_INLINE const char* getString() const noexcept { return _stringBuilder.getData(); }
//! Clear the resulting string.
ASMJIT_INLINE void clearString() noexcept { _stringBuilder.clear(); }
//! Get the length of the string returned by `getString()`.
ASMJIT_INLINE size_t getLength() const noexcept { return _stringBuilder.getLength(); }
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
ASMJIT_API Error _log(const char* buf, size_t len = Globals::kInvalidIndex) noexcept override;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Output string.
StringBuilder _stringBuilder;
};
// ============================================================================
// [asmjit::Logging]
// ============================================================================
struct Logging {
ASMJIT_API static Error formatRegister(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t regType,
uint32_t regId) noexcept;
ASMJIT_API static Error formatLabel(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t labelId) noexcept;
ASMJIT_API static Error formatOperand(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Operand_& op) noexcept;
ASMJIT_API static Error formatInstruction(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Inst::Detail& detail, const Operand_* opArray, uint32_t opCount) noexcept;
#if !defined(ASMJIT_DISABLE_BUILDER)
ASMJIT_API static Error formatNode(
StringBuilder& sb,
uint32_t logOptions,
const CodeBuilder* cb,
const CBNode* node_) noexcept;
#endif // !ASMJIT_DISABLE_BUILDER
// Only used by AsmJit internals, not available to users.
#if defined(ASMJIT_EXPORTS)
enum {
// Has to be big to be able to hold all metadata compiler can assign to a
// single instruction.
kMaxCommentLength = 512,
kMaxInstLength = 40,
kMaxBinaryLength = 26
};
static Error formatLine(
StringBuilder& sb,
const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept;
#endif // ASMJIT_EXPORTS
};
#else
class Logger;
#endif // !ASMJIT_DISABLE_LOGGING
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_LOGGER_H

View file

@ -0,0 +1,74 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_MISC_P_H
#define _ASMJIT_BASE_MISC_P_H
// [Dependencies]
#include "../asmjit_build.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
//! \internal
//!
//! Macro used to populate a table with 16 elements starting at `I`.
#define ASMJIT_TABLE_16(DEF, I) DEF(I + 0), DEF(I + 1), DEF(I + 2), DEF(I + 3), \
DEF(I + 4), DEF(I + 5), DEF(I + 6), DEF(I + 7), \
DEF(I + 8), DEF(I + 9), DEF(I + 10), DEF(I + 11), \
DEF(I + 12), DEF(I + 13), DEF(I + 14), DEF(I + 15)
#define ASMJIT_TABLE_T_8(TABLE, VALUE, I) \
TABLE< I + 0 >::VALUE, TABLE< I + 1 >::VALUE, \
TABLE< I + 2 >::VALUE, TABLE< I + 3 >::VALUE, \
TABLE< I + 4 >::VALUE, TABLE< I + 5 >::VALUE, \
TABLE< I + 6 >::VALUE, TABLE< I + 7 >::VALUE
#define ASMJIT_TABLE_T_16(TABLE, VALUE, I) \
ASMJIT_TABLE_T_8(TABLE, VALUE, I), \
ASMJIT_TABLE_T_8(TABLE, VALUE, I + 8)
#define ASMJIT_TABLE_T_32(TABLE, VALUE, I) \
ASMJIT_TABLE_T_16(TABLE, VALUE, I), \
ASMJIT_TABLE_T_16(TABLE, VALUE, I + 16)
#define ASMJIT_TABLE_T_64(TABLE, VALUE, I) \
ASMJIT_TABLE_T_32(TABLE, VALUE, I), \
ASMJIT_TABLE_T_32(TABLE, VALUE, I + 32)
#define ASMJIT_TABLE_T_128(TABLE, VALUE, I) \
ASMJIT_TABLE_T_64(TABLE, VALUE, I), \
ASMJIT_TABLE_T_64(TABLE, VALUE, I + 64)
#define ASMJIT_TABLE_T_256(TABLE, VALUE, I) \
ASMJIT_TABLE_T_128(TABLE, VALUE, I), \
ASMJIT_TABLE_T_128(TABLE, VALUE, I + 128)
#define ASMJIT_TABLE_T_512(TABLE, VALUE, I) \
ASMJIT_TABLE_T_256(TABLE, VALUE, I), \
ASMJIT_TABLE_T_256(TABLE, VALUE, I + 256)
#define ASMJIT_TABLE_T_1024(TABLE, VALUE, I) \
ASMJIT_TABLE_T_512(TABLE, VALUE, I), \
ASMJIT_TABLE_T_512(TABLE, VALUE, I + 512)
//! \}
} // asmjit namespace
//! \}
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_MISC_P_H

View file

@ -0,0 +1,209 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::TypeId]
// ============================================================================
template<int ID>
struct TypeIdSizeOf_T {
enum {
kValue = (ID == TypeId::kI8 ) ? 1 :
(ID == TypeId::kU8 ) ? 1 :
(ID == TypeId::kI16 ) ? 2 :
(ID == TypeId::kU16 ) ? 2 :
(ID == TypeId::kI32 ) ? 4 :
(ID == TypeId::kU32 ) ? 4 :
(ID == TypeId::kI64 ) ? 8 :
(ID == TypeId::kU64 ) ? 8 :
(ID == TypeId::kF32 ) ? 4 :
(ID == TypeId::kF64 ) ? 8 :
(ID == TypeId::kF80 ) ? 10 :
(ID == TypeId::kMask8 ) ? 1 :
(ID == TypeId::kMask16) ? 2 :
(ID == TypeId::kMask32) ? 4 :
(ID == TypeId::kMask64) ? 8 :
(ID == TypeId::kMmx32 ) ? 4 :
(ID == TypeId::kMmx64 ) ? 8 :
(ID >= TypeId::_kVec32Start && ID <= TypeId::_kVec32End ) ? 4 :
(ID >= TypeId::_kVec64Start && ID <= TypeId::_kVec64End ) ? 8 :
(ID >= TypeId::_kVec128Start && ID <= TypeId::_kVec128End) ? 16 :
(ID >= TypeId::_kVec256Start && ID <= TypeId::_kVec256End) ? 32 :
(ID >= TypeId::_kVec512Start && ID <= TypeId::_kVec512End) ? 64 : 0
};
};
template<int ID>
struct TypeIdElementOf_T {
enum {
kValue = (ID == TypeId::kMask8 ) ? TypeId::kU8 :
(ID == TypeId::kMask16) ? TypeId::kU16 :
(ID == TypeId::kMask32) ? TypeId::kU32 :
(ID == TypeId::kMask64) ? TypeId::kU64 :
(ID == TypeId::kMmx32 ) ? TypeId::kI32 :
(ID == TypeId::kMmx64 ) ? TypeId::kI64 :
(ID >= TypeId::kI8 && ID <= TypeId::kF80 ) ? ID :
(ID >= TypeId::_kVec32Start && ID <= TypeId::_kVec32End ) ? ID - TypeId::_kVec32Start + TypeId::kI8 :
(ID >= TypeId::_kVec64Start && ID <= TypeId::_kVec64End ) ? ID - TypeId::_kVec64Start + TypeId::kI8 :
(ID >= TypeId::_kVec128Start && ID <= TypeId::_kVec128End) ? ID - TypeId::_kVec128Start + TypeId::kI8 :
(ID >= TypeId::_kVec256Start && ID <= TypeId::_kVec256End) ? ID - TypeId::_kVec256Start + TypeId::kI8 :
(ID >= TypeId::_kVec512Start && ID <= TypeId::_kVec512End) ? ID - TypeId::_kVec512Start + TypeId::kI8 : 0
};
};
#define R(TMPL, I) TMPL<I + 0>::kValue, TMPL<I + 1>::kValue, \
TMPL<I + 2>::kValue, TMPL<I + 3>::kValue, \
TMPL<I + 4>::kValue, TMPL<I + 5>::kValue, \
TMPL<I + 6>::kValue, TMPL<I + 7>::kValue, \
TMPL<I + 8>::kValue, TMPL<I + 9>::kValue, \
TMPL<I + 10>::kValue, TMPL<I + 11>::kValue, \
TMPL<I + 12>::kValue, TMPL<I + 13>::kValue, \
TMPL<I + 14>::kValue, TMPL<I + 15>::kValue
ASMJIT_API const TypeId::Info TypeId::_info = {
// SizeOf[128]
{
R(TypeIdSizeOf_T, 0), R(TypeIdSizeOf_T, 16),
R(TypeIdSizeOf_T, 32), R(TypeIdSizeOf_T, 48),
R(TypeIdSizeOf_T, 64), R(TypeIdSizeOf_T, 80),
R(TypeIdSizeOf_T, 96), R(TypeIdSizeOf_T, 112)
},
// ElementOf[128]
{
R(TypeIdElementOf_T, 0), R(TypeIdElementOf_T, 16),
R(TypeIdElementOf_T, 32), R(TypeIdElementOf_T, 48),
R(TypeIdElementOf_T, 64), R(TypeIdElementOf_T, 80),
R(TypeIdElementOf_T, 96), R(TypeIdElementOf_T, 112)
}
};
#undef R
// ============================================================================
// [asmjit::Operand - Test]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(base_operand) {
INFO("Checking operand sizes");
EXPECT(sizeof(Operand) == 16);
EXPECT(sizeof(Reg) == 16);
EXPECT(sizeof(Mem) == 16);
EXPECT(sizeof(Imm) == 16);
EXPECT(sizeof(Label) == 16);
INFO("Checking basic functionality of Operand");
Operand a, b;
Operand dummy;
EXPECT(a.isNone() == true);
EXPECT(a.isReg() == false);
EXPECT(a.isMem() == false);
EXPECT(a.isImm() == false);
EXPECT(a.isLabel() == false);
EXPECT(a == b);
EXPECT(a._any.reserved8_4 == 0, "Default constructed Operand should zero its 'reserved8_4' field");
EXPECT(a._any.reserved12_4 == 0, "Default constructed Operand should zero its 'reserved12_4' field");
INFO("Checking basic functionality of Label");
Label label;
EXPECT(label.isValid() == false);
EXPECT(label.getId() == 0);
INFO("Checking basic functionality of Reg");
EXPECT(Reg().isValid() == false,
"Default constructed Reg() should not be valid");
EXPECT(Reg()._any.reserved8_4 == 0,
"A default constructed Reg() should zero its 'reserved8_4' field");
EXPECT(Reg()._any.reserved12_4 == 0,
"A default constructed Reg() should zero its 'reserved12_4' field");
EXPECT(Reg().isReg() == false,
"Default constructed register should not isReg()");
EXPECT(dummy.as<Reg>().isValid() == false,
"Default constructed Operand casted to Reg should not be valid");
// Create some register (not specific to any architecture).
uint32_t rSig = Operand::kOpReg | (1 << Operand::kSignatureRegTypeShift) |
(2 << Operand::kSignatureRegKindShift) |
(8 << Operand::kSignatureSizeShift ) ;
Reg r1(Reg::fromSignature(rSig, 5));
EXPECT(r1.isValid() == true);
EXPECT(r1.isReg() == true);
EXPECT(r1.isReg(1) == true);
EXPECT(r1.isPhysReg() == true);
EXPECT(r1.isVirtReg() == false);
EXPECT(r1.getSignature() == rSig);
EXPECT(r1.getType() == 1);
EXPECT(r1.getKind() == 2);
EXPECT(r1.getSize() == 8);
EXPECT(r1.getId() == 5);
EXPECT(r1.isReg(1, 5) == true); // RegType and Id.
EXPECT(r1._any.reserved8_4 == 0, "Reg should have 'reserved8_4' zero");
EXPECT(r1._any.reserved12_4 == 0, "Reg should have 'reserved12_4' zero");
// The same type of register having different id.
Reg r2(r1, 6);
EXPECT(r2.isValid() == true);
EXPECT(r2.isReg() == true);
EXPECT(r2.isReg(1) == true);
EXPECT(r2.isPhysReg() == true);
EXPECT(r2.isVirtReg() == false);
EXPECT(r2.getSignature() == rSig);
EXPECT(r2.getType() == r1.getType());
EXPECT(r2.getKind() == r1.getKind());
EXPECT(r2.getSize() == r1.getSize());
EXPECT(r2.getId() == 6);
EXPECT(r2.isReg(1, 6) == true);
r1.reset();
EXPECT(!r1.isValid(),
"Reset register should not be valid");
EXPECT(!r1.isReg(),
"Reset register should not isReg()");
INFO("Checking basic functionality of Mem");
Mem m;
EXPECT(m.isMem() , "Default constructed Mem() should isMem()");
EXPECT(m == Mem() , "Two default constructed Mem() operands should be equal");
EXPECT(m.hasBase() == false , "Default constructed Mem() should not have base specified");
EXPECT(m.hasIndex() == false , "Default constructed Mem() should not have index specified");
EXPECT(m.has64BitOffset() == true , "Default constructed Mem() should report 64-bit offset");
EXPECT(m.getOffset() == 0 , "Default constructed Mem() should have be zero offset / address");
m.setOffset(-1);
EXPECT(m.getOffsetLo32() == -1 , "Memory operand must hold a 32-bit offset");
EXPECT(m.getOffset() == -1 , "32-bit offset must be sign extended to 64 bits");
int64_t x = int64_t(ASMJIT_UINT64_C(0xFF00FF0000000001));
m.setOffset(x);
EXPECT(m.getOffset() == x , "Memory operand must hold a 64-bit offset");
EXPECT(m.getOffsetLo32() == 1 , "Memory operand must return correct low offset DWORD");
EXPECT(m.getOffsetHi32() == 0xFF00FF00, "Memory operand must return correct high offset DWORD");
INFO("Checking basic functionality of Imm");
EXPECT(Imm(-1).getInt64() == int64_t(-1),
"Immediate values should by default sign-extend to 64-bits");
}
#endif // ASMJIT_TEST
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,228 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/osutils.h"
#include "../base/utils.h"
#if ASMJIT_OS_POSIX
# include <sys/types.h>
# include <sys/mman.h>
# include <time.h>
# include <unistd.h>
#endif // ASMJIT_OS_POSIX
#if ASMJIT_OS_MAC
# include <mach/mach_time.h>
#endif // ASMJIT_OS_MAC
#if ASMJIT_OS_WINDOWS
# if defined(_MSC_VER) && _MSC_VER >= 1400
# include <intrin.h>
# else
# define _InterlockedCompareExchange InterlockedCompareExchange
# endif // _MSC_VER
#endif // ASMJIT_OS_WINDOWS
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::OSUtils - Virtual Memory]
// ============================================================================
// Windows specific implementation using `VirtualAllocEx` and `VirtualFree`.
#if ASMJIT_OS_WINDOWS
static ASMJIT_NOINLINE const VMemInfo& OSUtils_GetVMemInfo() noexcept {
static VMemInfo vmi;
if (ASMJIT_UNLIKELY(!vmi.hCurrentProcess)) {
SYSTEM_INFO info;
::GetSystemInfo(&info);
vmi.pageSize = Utils::alignToPowerOf2<uint32_t>(info.dwPageSize);
vmi.pageGranularity = info.dwAllocationGranularity;
vmi.hCurrentProcess = ::GetCurrentProcess();
}
return vmi;
};
VMemInfo OSUtils::getVirtualMemoryInfo() noexcept { return OSUtils_GetVMemInfo(); }
void* OSUtils::allocVirtualMemory(size_t size, size_t* allocated, uint32_t flags) noexcept {
return allocProcessMemory(static_cast<HANDLE>(0), size, allocated, flags);
}
Error OSUtils::releaseVirtualMemory(void* p, size_t size) noexcept {
return releaseProcessMemory(static_cast<HANDLE>(0), p, size);
}
void* OSUtils::allocProcessMemory(HANDLE hProcess, size_t size, size_t* allocated, uint32_t flags) noexcept {
if (size == 0)
return nullptr;
const VMemInfo& vmi = OSUtils_GetVMemInfo();
if (!hProcess) hProcess = vmi.hCurrentProcess;
// VirtualAllocEx rounds the allocated size to a page size automatically,
// but we need the `alignedSize` so we can store the real allocated size
// into `allocated` output.
size_t alignedSize = Utils::alignTo(size, vmi.pageSize);
// Windows XP SP2 / Vista+ allow data-execution-prevention (DEP).
DWORD protectFlags = 0;
if (flags & kVMExecutable)
protectFlags |= (flags & kVMWritable) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
else
protectFlags |= (flags & kVMWritable) ? PAGE_READWRITE : PAGE_READONLY;
LPVOID mBase = ::VirtualAllocEx(hProcess, nullptr, alignedSize, MEM_COMMIT | MEM_RESERVE, protectFlags);
if (ASMJIT_UNLIKELY(!mBase)) return nullptr;
ASMJIT_ASSERT(Utils::isAligned<size_t>(reinterpret_cast<size_t>(mBase), vmi.pageSize));
if (allocated) *allocated = alignedSize;
return mBase;
}
Error OSUtils::releaseProcessMemory(HANDLE hProcess, void* p, size_t size) noexcept {
const VMemInfo& vmi = OSUtils_GetVMemInfo();
if (!hProcess) hProcess = vmi.hCurrentProcess;
if (ASMJIT_UNLIKELY(!::VirtualFreeEx(hProcess, p, 0, MEM_RELEASE)))
return DebugUtils::errored(kErrorInvalidState);
return kErrorOk;
}
#endif // ASMJIT_OS_WINDOWS
// Posix specific implementation using `mmap()` and `munmap()`.
#if ASMJIT_OS_POSIX
// Mac uses MAP_ANON instead of MAP_ANONYMOUS.
#if !defined(MAP_ANONYMOUS)
# define MAP_ANONYMOUS MAP_ANON
#endif // MAP_ANONYMOUS
static const VMemInfo& OSUtils_GetVMemInfo() noexcept {
static VMemInfo vmi;
if (ASMJIT_UNLIKELY(!vmi.pageSize)) {
size_t pageSize = ::getpagesize();
vmi.pageSize = pageSize;
vmi.pageGranularity = std::max<size_t>(pageSize, 65536);
}
return vmi;
};
VMemInfo OSUtils::getVirtualMemoryInfo() noexcept { return OSUtils_GetVMemInfo(); }
void* OSUtils::allocVirtualMemory(size_t size, size_t* allocated, uint32_t flags) noexcept {
const VMemInfo& vmi = OSUtils_GetVMemInfo();
size_t alignedSize = Utils::alignTo<size_t>(size, vmi.pageSize);
int protection = PROT_READ;
if (flags & kVMWritable ) protection |= PROT_WRITE;
if (flags & kVMExecutable) protection |= PROT_EXEC;
void* mbase = ::mmap(nullptr, alignedSize, protection, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ASMJIT_UNLIKELY(mbase == MAP_FAILED)) return nullptr;
if (allocated) *allocated = alignedSize;
return mbase;
}
Error OSUtils::releaseVirtualMemory(void* p, size_t size) noexcept {
if (ASMJIT_UNLIKELY(::munmap(p, size) != 0))
return DebugUtils::errored(kErrorInvalidState);
return kErrorOk;
}
#endif // ASMJIT_OS_POSIX
// ============================================================================
// [asmjit::OSUtils - GetTickCount]
// ============================================================================
#if ASMJIT_OS_WINDOWS
static ASMJIT_INLINE uint32_t OSUtils_calcHiRes(const LARGE_INTEGER& now, double freq) noexcept {
return static_cast<uint32_t>(
(int64_t)(double(now.QuadPart) / freq) & 0xFFFFFFFF);
}
uint32_t OSUtils::getTickCount() noexcept {
static volatile uint32_t _hiResTicks;
static volatile double _hiResFreq;
do {
uint32_t hiResOk = _hiResTicks;
LARGE_INTEGER qpf, now;
// If for whatever reason this fails, bail to `GetTickCount()`.
if (!::QueryPerformanceCounter(&now)) break;
// Expected - if we ran through this at least once `hiResTicks` will be
// either 1 or 0xFFFFFFFF. If it's '1' then the Hi-Res counter is available
// and `QueryPerformanceCounter()` can be used.
if (hiResOk == 1) return OSUtils_calcHiRes(now, _hiResFreq);
// Hi-Res counter is not available, bail to `GetTickCount()`.
if (hiResOk != 0) break;
// Detect availability of Hi-Res counter, if not available, bail to `GetTickCount()`.
if (!::QueryPerformanceFrequency(&qpf)) {
_InterlockedCompareExchange((LONG*)&_hiResTicks, 0xFFFFFFFF, 0);
break;
}
double freq = double(qpf.QuadPart) / 1000.0;
_hiResFreq = freq;
_InterlockedCompareExchange((LONG*)&_hiResTicks, 1, 0);
return OSUtils_calcHiRes(now, freq);
} while (0);
return ::GetTickCount();
}
#elif ASMJIT_OS_MAC
uint32_t OSUtils::getTickCount() noexcept {
static mach_timebase_info_data_t _machTime;
// See Apple's QA1398.
if (ASMJIT_UNLIKELY(_machTime.denom == 0) || mach_timebase_info(&_machTime) != KERN_SUCCESS)
return 0;
// `mach_absolute_time()` returns nanoseconds, we want milliseconds.
uint64_t t = mach_absolute_time() / 1000000;
t = t * _machTime.numer / _machTime.denom;
return static_cast<uint32_t>(t & 0xFFFFFFFFU);
}
#elif defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
uint32_t OSUtils::getTickCount() noexcept {
struct timespec ts;
if (ASMJIT_UNLIKELY(clock_gettime(CLOCK_MONOTONIC, &ts) != 0))
return 0;
uint64_t t = (uint64_t(ts.tv_sec ) * 1000) + (uint64_t(ts.tv_nsec) / 1000000);
return static_cast<uint32_t>(t & 0xFFFFFFFFU);
}
#else
#error "[asmjit] OSUtils::getTickCount() is not implemented for your target OS."
uint32_t OSUtils::getTickCount() noexcept { return 0; }
#endif
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,178 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_OSUTILS_H
#define _ASMJIT_BASE_OSUTILS_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::VMemInfo]
// ============================================================================
//! Information about OS virtual memory.
struct VMemInfo {
#if ASMJIT_OS_WINDOWS
HANDLE hCurrentProcess; //!< Handle of the current process (Windows).
#endif // ASMJIT_OS_WINDOWS
size_t pageSize; //!< Virtual memory page size.
size_t pageGranularity; //!< Virtual memory page granularity.
};
// ============================================================================
// [asmjit::OSUtils]
// ============================================================================
//! OS utilities.
//!
//! Virtual Memory
//! --------------
//!
//! Provides functions to allocate and release virtual memory that is required
//! to execute dynamically generated code. If both processor and host OS support
//! data-execution-prevention (DEP) then the only way to run machine code is to
//! allocate virtual memory that has `OSUtils::kVMExecutable` flag enabled. All
//! functions provides by OSUtils use internally platform specific API.
//!
//! Benchmarking
//! ------------
//!
//! OSUtils also provide a function `getTickCount()` that can be used for
//! benchmarking purposes. It's similar to Windows-only `GetTickCount()`, but
//! it's cross-platform and tries to be the most reliable platform specific
//! calls to make the result usable.
struct OSUtils {
// --------------------------------------------------------------------------
// [Virtual Memory]
// --------------------------------------------------------------------------
//! Virtual memory flags.
ASMJIT_ENUM(VMFlags) {
kVMWritable = 0x00000001U, //!< Virtual memory is writable.
kVMExecutable = 0x00000002U //!< Virtual memory is executable.
};
ASMJIT_API static VMemInfo getVirtualMemoryInfo() noexcept;
//! Allocate virtual memory.
ASMJIT_API static void* allocVirtualMemory(size_t size, size_t* allocated, uint32_t flags) noexcept;
//! Release virtual memory previously allocated by \ref allocVirtualMemory().
ASMJIT_API static Error releaseVirtualMemory(void* p, size_t size) noexcept;
#if ASMJIT_OS_WINDOWS
//! Allocate virtual memory of `hProcess` (Windows).
ASMJIT_API static void* allocProcessMemory(HANDLE hProcess, size_t size, size_t* allocated, uint32_t flags) noexcept;
//! Release virtual memory of `hProcess` (Windows).
ASMJIT_API static Error releaseProcessMemory(HANDLE hProcess, void* p, size_t size) noexcept;
#endif // ASMJIT_OS_WINDOWS
// --------------------------------------------------------------------------
// [GetTickCount]
// --------------------------------------------------------------------------
//! Get the current CPU tick count, used for benchmarking (1ms resolution).
ASMJIT_API static uint32_t getTickCount() noexcept;
};
// ============================================================================
// [asmjit::Lock]
// ============================================================================
//! \internal
//!
//! Lock.
struct Lock {
ASMJIT_NONCOPYABLE(Lock)
// --------------------------------------------------------------------------
// [Windows]
// --------------------------------------------------------------------------
#if ASMJIT_OS_WINDOWS
typedef CRITICAL_SECTION Handle;
//! Create a new `Lock` instance.
ASMJIT_INLINE Lock() noexcept { InitializeCriticalSection(&_handle); }
//! Destroy the `Lock` instance.
ASMJIT_INLINE ~Lock() noexcept { DeleteCriticalSection(&_handle); }
//! Lock.
ASMJIT_INLINE void lock() noexcept { EnterCriticalSection(&_handle); }
//! Unlock.
ASMJIT_INLINE void unlock() noexcept { LeaveCriticalSection(&_handle); }
#endif // ASMJIT_OS_WINDOWS
// --------------------------------------------------------------------------
// [Posix]
// --------------------------------------------------------------------------
#if ASMJIT_OS_POSIX
typedef pthread_mutex_t Handle;
//! Create a new `Lock` instance.
ASMJIT_INLINE Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
//! Destroy the `Lock` instance.
ASMJIT_INLINE ~Lock() noexcept { pthread_mutex_destroy(&_handle); }
//! Lock.
ASMJIT_INLINE void lock() noexcept { pthread_mutex_lock(&_handle); }
//! Unlock.
ASMJIT_INLINE void unlock() noexcept { pthread_mutex_unlock(&_handle); }
#endif // ASMJIT_OS_POSIX
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Native handle.
Handle _handle;
};
// ============================================================================
// [asmjit::AutoLock]
// ============================================================================
//! \internal
//!
//! Scoped lock.
struct AutoLock {
ASMJIT_NONCOPYABLE(AutoLock)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE AutoLock(Lock& target) noexcept : _target(target) { _target.lock(); }
ASMJIT_INLINE ~AutoLock() noexcept { _target.unlock(); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Reference to the `Lock`.
Lock& _target;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_OSUTILS_H

View file

@ -0,0 +1,594 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/regalloc_p.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::RAPass - Construction / Destruction]
// ============================================================================
RAPass::RAPass() noexcept :
CBPass("RA"),
_varMapToVaListOffset(0) {}
RAPass::~RAPass() noexcept {}
// ============================================================================
// [asmjit::RAPass - Interface]
// ============================================================================
Error RAPass::process(Zone* zone) noexcept {
_zone = zone;
_heap.reset(zone);
_emitComments = (cb()->getGlobalOptions() & CodeEmitter::kOptionLoggingEnabled) != 0;
Error err = kErrorOk;
CBNode* node = cc()->getFirstNode();
if (!node) return err;
do {
if (node->getType() == CBNode::kNodeFunc) {
CCFunc* func = static_cast<CCFunc*>(node);
node = func->getEnd();
err = compile(func);
if (err) break;
}
// Find a function by skipping all nodes that are not `kNodeFunc`.
do {
node = node->getNext();
} while (node && node->getType() != CBNode::kNodeFunc);
} while (node);
_heap.reset(nullptr);
_zone = nullptr;
return err;
}
Error RAPass::compile(CCFunc* func) noexcept {
ASMJIT_PROPAGATE(prepare(func));
Error err;
do {
err = fetch();
if (err) break;
err = removeUnreachableCode();
if (err) break;
err = livenessAnalysis();
if (err) break;
#if !defined(ASMJIT_DISABLE_LOGGING)
if (cc()->getGlobalOptions() & CodeEmitter::kOptionLoggingEnabled) {
err = annotate();
if (err) break;
}
#endif // !ASMJIT_DISABLE_LOGGING
err = translate();
} while (false);
cleanup();
// We alter the compiler cursor, because it doesn't make sense to reference
// it after compilation - some nodes may disappear and it's forbidden to add
// new code after the compilation is done.
cc()->_setCursor(nullptr);
return err;
}
Error RAPass::prepare(CCFunc* func) noexcept {
CBNode* end = func->getEnd();
_func = func;
_stop = end->getNext();
_unreachableList.reset();
_returningList.reset();
_jccList.reset();
_contextVd.reset();
_memVarCells = nullptr;
_memStackCells = nullptr;
_mem1ByteVarsUsed = 0;
_mem2ByteVarsUsed = 0;
_mem4ByteVarsUsed = 0;
_mem8ByteVarsUsed = 0;
_mem16ByteVarsUsed = 0;
_mem32ByteVarsUsed = 0;
_mem64ByteVarsUsed = 0;
_memStackCellsUsed = 0;
_memMaxAlign = 0;
_memVarTotal = 0;
_memStackTotal = 0;
_memAllTotal = 0;
_annotationLength = 12;
return kErrorOk;
}
void RAPass::cleanup() noexcept {
VirtReg** virtArray = _contextVd.getData();
size_t virtCount = _contextVd.getLength();
for (size_t i = 0; i < virtCount; i++) {
VirtReg* vreg = virtArray[i];
vreg->_raId = kInvalidValue;
vreg->resetPhysId();
}
_contextVd.reset();
}
// ============================================================================
// [asmjit::RAPass - Mem]
// ============================================================================
static ASMJIT_INLINE uint32_t RAGetDefaultAlignment(uint32_t size) {
if (size > 32)
return 64;
else if (size > 16)
return 32;
else if (size > 8)
return 16;
else if (size > 4)
return 8;
else if (size > 2)
return 4;
else if (size > 1)
return 2;
else
return 1;
}
RACell* RAPass::_newVarCell(VirtReg* vreg) {
ASMJIT_ASSERT(vreg->_memCell == nullptr);
RACell* cell;
uint32_t size = vreg->getSize();
if (vreg->isStack()) {
cell = _newStackCell(size, vreg->getAlignment());
if (ASMJIT_UNLIKELY(!cell)) return nullptr;
}
else {
cell = static_cast<RACell*>(_zone->alloc(sizeof(RACell)));
if (!cell) goto _NoMemory;
cell->next = _memVarCells;
cell->offset = 0;
cell->size = size;
cell->alignment = size;
_memVarCells = cell;
_memMaxAlign = std::max<uint32_t>(_memMaxAlign, size);
_memVarTotal += size;
switch (size) {
case 1: _mem1ByteVarsUsed++ ; break;
case 2: _mem2ByteVarsUsed++ ; break;
case 4: _mem4ByteVarsUsed++ ; break;
case 8: _mem8ByteVarsUsed++ ; break;
case 16: _mem16ByteVarsUsed++; break;
case 32: _mem32ByteVarsUsed++; break;
case 64: _mem64ByteVarsUsed++; break;
default:
ASMJIT_NOT_REACHED();
}
}
vreg->_memCell = cell;
return cell;
_NoMemory:
cc()->setLastError(DebugUtils::errored(kErrorNoHeapMemory));
return nullptr;
}
RACell* RAPass::_newStackCell(uint32_t size, uint32_t alignment) {
RACell* cell = static_cast<RACell*>(_zone->alloc(sizeof(RACell)));
if (ASMJIT_UNLIKELY(!cell)) return nullptr;
if (alignment == 0)
alignment = RAGetDefaultAlignment(size);
if (alignment > 64)
alignment = 64;
ASMJIT_ASSERT(Utils::isPowerOf2(alignment));
size = Utils::alignTo<uint32_t>(size, alignment);
// Insert it sorted according to the alignment and size.
{
RACell** pPrev = &_memStackCells;
RACell* cur = *pPrev;
while (cur && ((cur->alignment > alignment) || (cur->alignment == alignment && cur->size > size))) {
pPrev = &cur->next;
cur = *pPrev;
}
cell->next = cur;
cell->offset = 0;
cell->size = size;
cell->alignment = alignment;
*pPrev = cell;
_memStackCellsUsed++;
_memMaxAlign = std::max<uint32_t>(_memMaxAlign, alignment);
_memStackTotal += size;
}
return cell;
}
Error RAPass::resolveCellOffsets() {
RACell* varCell = _memVarCells;
RACell* stackCell = _memStackCells;
uint32_t pos64 = 0;
uint32_t pos32 = pos64 + _mem64ByteVarsUsed * 64;
uint32_t pos16 = pos32 + _mem32ByteVarsUsed * 32;
uint32_t pos8 = pos16 + _mem16ByteVarsUsed * 16;
uint32_t pos4 = pos8 + _mem8ByteVarsUsed * 8 ;
uint32_t pos2 = pos4 + _mem4ByteVarsUsed * 4 ;
uint32_t pos1 = pos2 + _mem2ByteVarsUsed * 2 ;
// Assign home slots.
while (varCell) {
uint32_t size = varCell->size;
uint32_t offset = 0;
switch (size) {
case 1: offset = pos1 ; pos1 += 1 ; break;
case 2: offset = pos2 ; pos2 += 2 ; break;
case 4: offset = pos4 ; pos4 += 4 ; break;
case 8: offset = pos8 ; pos8 += 8 ; break;
case 16: offset = pos16; pos16 += 16; break;
case 32: offset = pos32; pos32 += 32; break;
case 64: offset = pos64; pos64 += 64; break;
default:
ASMJIT_NOT_REACHED();
}
varCell->offset = static_cast<int32_t>(offset);
varCell = varCell->next;
}
// Assign stack slots.
uint32_t stackPos = pos1 + _mem1ByteVarsUsed;
while (stackCell) {
uint32_t size = stackCell->size;
uint32_t alignment = stackCell->alignment;
ASMJIT_ASSERT(alignment != 0 && Utils::isPowerOf2(alignment));
stackPos = Utils::alignTo(stackPos, alignment);
stackCell->offset = stackPos;
stackCell = stackCell->next;
stackPos += size;
}
_memAllTotal = stackPos;
return kErrorOk;
}
// ============================================================================
// [asmjit::RAPass - RemoveUnreachableCode]
// ============================================================================
Error RAPass::removeUnreachableCode() {
ZoneList<CBNode*>::Link* link = _unreachableList.getFirst();
CBNode* stop = getStop();
while (link) {
CBNode* node = link->getValue();
if (node && node->getPrev() && node != stop) {
// Locate all unreachable nodes.
CBNode* first = node;
do {
if (node->hasPassData()) break;
node = node->getNext();
} while (node != stop);
// Remove unreachable nodes that are neither informative nor directives.
if (node != first) {
CBNode* end = node;
node = first;
// NOTE: The strategy is as follows:
// 1. The algorithm removes everything until it finds a first label.
// 2. After the first label is found it removes only removable nodes.
bool removeEverything = true;
do {
CBNode* next = node->getNext();
bool remove = node->isRemovable();
if (!remove) {
if (node->isLabel())
removeEverything = false;
remove = removeEverything;
}
if (remove)
cc()->removeNode(node);
node = next;
} while (node != end);
}
}
link = link->getNext();
}
return kErrorOk;
}
// ============================================================================
// [asmjit::RAPass - Liveness Analysis]
// ============================================================================
//! \internal
struct LivenessTarget {
LivenessTarget* prev; //!< Previous target.
CBLabel* node; //!< Target node.
CBJump* from; //!< Jumped from.
};
Error RAPass::livenessAnalysis() {
uint32_t bLen = static_cast<uint32_t>(
((_contextVd.getLength() + RABits::kEntityBits - 1) / RABits::kEntityBits));
// No variables.
if (bLen == 0)
return kErrorOk;
CCFunc* func = getFunc();
CBJump* from = nullptr;
LivenessTarget* ltCur = nullptr;
LivenessTarget* ltUnused = nullptr;
ZoneList<CBNode*>::Link* retPtr = _returningList.getFirst();
ASMJIT_ASSERT(retPtr != nullptr);
CBNode* node = retPtr->getValue();
RAData* wd;
size_t varMapToVaListOffset = _varMapToVaListOffset;
RABits* bCur = newBits(bLen);
if (ASMJIT_UNLIKELY(!bCur)) goto NoMem;
// Allocate bits for code visited first time.
Visit:
for (;;) {
wd = node->getPassData<RAData>();
if (wd->liveness) {
if (bCur->_addBitsDelSource(wd->liveness, bCur, bLen))
goto Patch;
else
goto Done;
}
RABits* bTmp = copyBits(bCur, bLen);
if (!bTmp) goto NoMem;
wd = node->getPassData<RAData>();
wd->liveness = bTmp;
uint32_t tiedTotal = wd->tiedTotal;
TiedReg* tiedArray = reinterpret_cast<TiedReg*>(((uint8_t*)wd) + varMapToVaListOffset);
for (uint32_t i = 0; i < tiedTotal; i++) {
TiedReg* tied = &tiedArray[i];
VirtReg* vreg = tied->vreg;
uint32_t flags = tied->flags;
uint32_t raId = vreg->_raId;
if ((flags & TiedReg::kWAll) && !(flags & TiedReg::kRAll)) {
// Write-Only.
bTmp->setBit(raId);
bCur->delBit(raId);
}
else {
// Read-Only or Read/Write.
bTmp->setBit(raId);
bCur->setBit(raId);
}
}
if (node->getType() == CBNode::kNodeLabel)
goto Target;
if (node == func)
goto Done;
ASMJIT_ASSERT(node->getPrev());
node = node->getPrev();
}
// Patch already generated liveness bits.
Patch:
for (;;) {
ASMJIT_ASSERT(node->hasPassData());
ASMJIT_ASSERT(node->getPassData<RAData>()->liveness != nullptr);
RABits* bNode = node->getPassData<RAData>()->liveness;
if (!bNode->_addBitsDelSource(bCur, bLen)) goto Done;
if (node->getType() == CBNode::kNodeLabel) goto Target;
if (node == func) goto Done;
node = node->getPrev();
}
Target:
if (static_cast<CBLabel*>(node)->getNumRefs() != 0) {
// Push a new LivenessTarget onto the stack if needed.
if (!ltCur || ltCur->node != node) {
// Allocate a new LivenessTarget object (from pool or zone).
LivenessTarget* ltTmp = ltUnused;
if (ltTmp) {
ltUnused = ltUnused->prev;
}
else {
ltTmp = _zone->allocT<LivenessTarget>(
sizeof(LivenessTarget) - sizeof(RABits) + bLen * sizeof(uintptr_t));
if (!ltTmp) goto NoMem;
}
// Initialize and make current - ltTmp->from will be set later on.
ltTmp->prev = ltCur;
ltTmp->node = static_cast<CBLabel*>(node);
ltCur = ltTmp;
from = static_cast<CBLabel*>(node)->getFrom();
ASMJIT_ASSERT(from != nullptr);
}
else {
from = ltCur->from;
goto JumpNext;
}
// Visit/Patch.
do {
ltCur->from = from;
bCur->copyBits(node->getPassData<RAData>()->liveness, bLen);
if (!from->getPassData<RAData>()->liveness) {
node = from;
goto Visit;
}
// Issue #25: Moved 'JumpNext' here since it's important to patch
// code again if there are more live variables than before.
JumpNext:
if (bCur->delBits(from->getPassData<RAData>()->liveness, bLen)) {
node = from;
goto Patch;
}
from = from->getJumpNext();
} while (from);
// Pop the current LivenessTarget from the stack.
{
LivenessTarget* ltTmp = ltCur;
ltCur = ltCur->prev;
ltTmp->prev = ltUnused;
ltUnused = ltTmp;
}
}
bCur->copyBits(node->getPassData<RAData>()->liveness, bLen);
node = node->getPrev();
if (node->isJmp() || !node->hasPassData()) goto Done;
wd = node->getPassData<RAData>();
if (!wd->liveness) goto Visit;
if (bCur->delBits(wd->liveness, bLen)) goto Patch;
Done:
if (ltCur) {
node = ltCur->node;
from = ltCur->from;
goto JumpNext;
}
retPtr = retPtr->getNext();
if (retPtr) {
node = retPtr->getValue();
goto Visit;
}
return kErrorOk;
NoMem:
return DebugUtils::errored(kErrorNoHeapMemory);
}
// ============================================================================
// [asmjit::RAPass - Annotate]
// ============================================================================
Error RAPass::formatInlineComment(StringBuilder& dst, CBNode* node) {
#if !defined(ASMJIT_DISABLE_LOGGING)
RAData* wd = node->getPassData<RAData>();
if (node->hasInlineComment())
dst.appendString(node->getInlineComment());
if (wd && wd->liveness) {
if (dst.getLength() < _annotationLength)
dst.appendChars(' ', _annotationLength - dst.getLength());
uint32_t vdCount = static_cast<uint32_t>(_contextVd.getLength());
size_t offset = dst.getLength() + 1;
dst.appendChar('[');
dst.appendChars(' ', vdCount);
dst.appendChar(']');
RABits* liveness = wd->liveness;
uint32_t i;
for (i = 0; i < vdCount; i++) {
if (liveness->getBit(i))
dst.getData()[offset + i] = '.';
}
uint32_t tiedTotal = wd->tiedTotal;
TiedReg* tiedArray = reinterpret_cast<TiedReg*>(((uint8_t*)wd) + _varMapToVaListOffset);
for (i = 0; i < tiedTotal; i++) {
TiedReg* tied = &tiedArray[i];
VirtReg* vreg = tied->vreg;
uint32_t flags = tied->flags;
char c = 'u';
if ( (flags & TiedReg::kRAll) && !(flags & TiedReg::kWAll)) c = 'r';
if (!(flags & TiedReg::kRAll) && (flags & TiedReg::kWAll)) c = 'w';
if ( (flags & TiedReg::kRAll) && (flags & TiedReg::kWAll)) c = 'x';
// Uppercase if unused.
if ( (flags & TiedReg::kUnuse)) c -= 'a' - 'A';
ASMJIT_ASSERT(offset + vreg->_raId < dst.getLength());
dst._data[offset + vreg->_raId] = c;
}
}
#endif // !ASMJIT_DISABLE_LOGGING
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER

View file

@ -0,0 +1,568 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_REGALLOC_P_H
#define _ASMJIT_BASE_REGALLOC_P_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/codecompiler.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::TiedReg]
// ============================================================================
//! Tied register (CodeCompiler)
//!
//! Tied register is used to describe one ore more register operands that share
//! the same virtual register. Tied register contains all the data that is
//! essential for register allocation.
struct TiedReg {
//! Flags.
ASMJIT_ENUM(Flags) {
kRReg = 0x00000001U, //!< Register read.
kWReg = 0x00000002U, //!< Register write.
kXReg = 0x00000003U, //!< Register read-write.
kRMem = 0x00000004U, //!< Memory read.
kWMem = 0x00000008U, //!< Memory write.
kXMem = 0x0000000CU, //!< Memory read-write.
kRDecide = 0x00000010U, //!< RA can decide between reg/mem read.
kWDecide = 0x00000020U, //!< RA can decide between reg/mem write.
kXDecide = 0x00000030U, //!< RA can decide between reg/mem read-write.
kRFunc = 0x00000100U, //!< Function argument passed in register.
kWFunc = 0x00000200U, //!< Function return value passed into register.
kXFunc = 0x00000300U, //!< Function argument and return value.
kRCall = 0x00000400U, //!< Function call operand.
kSpill = 0x00000800U, //!< Variable should be spilled.
kUnuse = 0x00001000U, //!< Variable should be unused at the end of the instruction/node.
kRAll = kRReg | kRMem | kRDecide | kRFunc | kRCall, //!< All in-flags.
kWAll = kWReg | kWMem | kWDecide | kWFunc, //!< All out-flags.
kRDone = 0x00400000U, //!< Already allocated on the input.
kWDone = 0x00800000U, //!< Already allocated on the output.
kX86GpbLo = 0x10000000U,
kX86GpbHi = 0x20000000U,
kX86Fld4 = 0x40000000U,
kX86Fld8 = 0x80000000U
};
// --------------------------------------------------------------------------
// [Init / Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void init(VirtReg* vreg, uint32_t flags = 0, uint32_t inRegs = 0, uint32_t allocableRegs = 0) noexcept {
this->vreg = vreg;
this->flags = flags;
this->refCount = 0;
this->inPhysId = Globals::kInvalidRegId;
this->outPhysId = Globals::kInvalidRegId;
this->reserved = 0;
this->inRegs = inRegs;
this->allocableRegs = allocableRegs;
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get whether the variable has to be allocated in a specific input register.
ASMJIT_INLINE uint32_t hasInPhysId() const { return inPhysId != Globals::kInvalidRegId; }
//! Get whether the variable has to be allocated in a specific output register.
ASMJIT_INLINE uint32_t hasOutPhysId() const { return outPhysId != Globals::kInvalidRegId; }
//! Set the input register index.
ASMJIT_INLINE void setInPhysId(uint32_t index) { inPhysId = static_cast<uint8_t>(index); }
//! Set the output register index.
ASMJIT_INLINE void setOutPhysId(uint32_t index) { outPhysId = static_cast<uint8_t>(index); }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE TiedReg& operator=(const TiedReg& other) {
::memcpy(this, &other, sizeof(TiedReg));
return *this;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Pointer to the associated \ref VirtReg.
VirtReg* vreg;
//! Tied flags.
uint32_t flags;
union {
struct {
//! How many times the variable is used by the instruction/node.
uint8_t refCount;
//! Input register index or `kInvalidReg` if it's not given.
//!
//! Even if the input register index is not given (i.e. it may by any
//! register), register allocator should assign an index that will be
//! used to persist a variable into this specific index. It's helpful
//! in situations where one variable has to be allocated in multiple
//! registers to determine the register which will be persistent.
uint8_t inPhysId;
//! Output register index or `kInvalidReg` if it's not given.
//!
//! Typically `kInvalidReg` if variable is only used on input.
uint8_t outPhysId;
//! \internal
uint8_t reserved;
};
//! \internal
//!
//! Packed data #0.
uint32_t packed;
};
//! Mandatory input registers.
//!
//! Mandatory input registers are required by the instruction even if
//! there are duplicates. This schema allows us to allocate one variable
//! in one or more register when needed. Required mostly by instructions
//! that have implicit register operands (imul, cpuid, ...) and function
//! call.
uint32_t inRegs;
//! Allocable input registers.
//!
//! Optional input registers is a mask of all allocable registers for a given
//! variable where we have to pick one of them. This mask is usually not used
//! when _inRegs is set. If both masks are used then the register
//! allocator tries first to find an intersection between these and allocates
//! an extra slot if not found.
uint32_t allocableRegs;
};
// ============================================================================
// [asmjit::RABits]
// ============================================================================
//! Fixed size bit-array.
//!
//! Used by variable liveness analysis.
struct RABits {
// --------------------------------------------------------------------------
// [Enums]
// --------------------------------------------------------------------------
enum {
kEntitySize = static_cast<int>(sizeof(uintptr_t)),
kEntityBits = kEntitySize * 8
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uintptr_t getBit(uint32_t index) const noexcept {
return (data[index / kEntityBits] >> (index % kEntityBits)) & 1;
}
ASMJIT_INLINE void setBit(uint32_t index) noexcept {
data[index / kEntityBits] |= static_cast<uintptr_t>(1) << (index % kEntityBits);
}
ASMJIT_INLINE void delBit(uint32_t index) noexcept {
data[index / kEntityBits] &= ~(static_cast<uintptr_t>(1) << (index % kEntityBits));
}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Copy bits from `s0`, returns `true` if at least one bit is set in `s0`.
ASMJIT_INLINE bool copyBits(const RABits* s0, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool addBits(const RABits* s0, uint32_t len) noexcept {
return addBits(this, s0, len);
}
ASMJIT_INLINE bool addBits(const RABits* s0, const RABits* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] | s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool andBits(const RABits* s1, uint32_t len) noexcept {
return andBits(this, s1, len);
}
ASMJIT_INLINE bool andBits(const RABits* s0, const RABits* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool delBits(const RABits* s1, uint32_t len) noexcept {
return delBits(this, s1, len);
}
ASMJIT_INLINE bool delBits(const RABits* s0, const RABits* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & ~s1->data[i];
data[i] = t;
r |= t;
}
return r != 0;
}
ASMJIT_INLINE bool _addBitsDelSource(RABits* s1, uint32_t len) noexcept {
return _addBitsDelSource(this, s1, len);
}
ASMJIT_INLINE bool _addBitsDelSource(const RABits* s0, RABits* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t a = s0->data[i];
uintptr_t b = s1->data[i];
this->data[i] = a | b;
b &= ~a;
s1->data[i] = b;
r |= b;
}
return r != 0;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uintptr_t data[1];
};
// ============================================================================
// [asmjit::RACell]
// ============================================================================
//! Register allocator's (RA) memory cell.
struct RACell {
RACell* next; //!< Next active cell.
int32_t offset; //!< Cell offset, relative to base-offset.
uint32_t size; //!< Cell size.
uint32_t alignment; //!< Cell alignment.
};
// ============================================================================
// [asmjit::RAData]
// ============================================================================
//! Register allocator's (RA) data associated with each \ref CBNode.
struct RAData {
ASMJIT_INLINE RAData(uint32_t tiedTotal) noexcept
: liveness(nullptr),
state(nullptr),
tiedTotal(tiedTotal) {}
RABits* liveness; //!< Liveness bits (populated by liveness-analysis).
RAState* state; //!< Optional saved \ref RAState.
uint32_t tiedTotal; //!< Total count of \ref TiedReg regs.
};
// ============================================================================
// [asmjit::RAState]
// ============================================================================
//! Variables' state.
struct RAState {};
// ============================================================================
// [asmjit::RAPass]
// ============================================================================
//! \internal
//!
//! Register allocator pipeline used by \ref CodeCompiler.
struct RAPass : public CBPass {
public:
ASMJIT_NONCOPYABLE(RAPass)
typedef void (ASMJIT_CDECL* TraceNodeFunc)(RAPass* self, CBNode* node_, const char* prefix);
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
RAPass() noexcept;
virtual ~RAPass() noexcept;
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
virtual Error process(Zone* zone) noexcept override;
//! Run the register allocator for a given function `func`.
virtual Error compile(CCFunc* func) noexcept;
//! Called by `compile()` to prepare the register allocator to process the
//! given function. It should reset and set-up everything (i.e. no states
//! from a previous compilation should prevail).
virtual Error prepare(CCFunc* func) noexcept;
//! Called after `compile()` to clean everything up, no matter if it
//! succeeded or failed.
virtual void cleanup() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the associated `CodeCompiler`.
ASMJIT_INLINE CodeCompiler* cc() const noexcept { return static_cast<CodeCompiler*>(_cb); }
//! Get function.
ASMJIT_INLINE CCFunc* getFunc() const noexcept { return _func; }
//! Get stop node.
ASMJIT_INLINE CBNode* getStop() const noexcept { return _stop; }
// --------------------------------------------------------------------------
// [State]
// --------------------------------------------------------------------------
//! Get current state.
ASMJIT_INLINE RAState* getState() const { return _state; }
//! Load current state from `target` state.
virtual void loadState(RAState* src) = 0;
//! Save current state, returning new `RAState` instance.
virtual RAState* saveState() = 0;
//! Change the current state to `target` state.
virtual void switchState(RAState* src) = 0;
//! Change the current state to the intersection of two states `a` and `b`.
virtual void intersectStates(RAState* a, RAState* b) = 0;
// --------------------------------------------------------------------------
// [Context]
// --------------------------------------------------------------------------
ASMJIT_INLINE Error assignRAId(VirtReg* vreg) noexcept {
// Likely as a single virtual register would be mostly used more than once,
// this means that each virtual register will hit one bad case (doesn't
// have id) and then all likely cases.
if (ASMJIT_LIKELY(vreg->_raId != kInvalidValue)) return kErrorOk;
uint32_t raId = static_cast<uint32_t>(_contextVd.getLength());
ASMJIT_PROPAGATE(_contextVd.append(&_heap, vreg));
vreg->_raId = raId;
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Mem]
// --------------------------------------------------------------------------
RACell* _newVarCell(VirtReg* vreg);
RACell* _newStackCell(uint32_t size, uint32_t alignment);
ASMJIT_INLINE RACell* getVarCell(VirtReg* vreg) {
RACell* cell = vreg->getMemCell();
return cell ? cell : _newVarCell(vreg);
}
virtual Error resolveCellOffsets();
// --------------------------------------------------------------------------
// [Bits]
// --------------------------------------------------------------------------
ASMJIT_INLINE RABits* newBits(uint32_t len) {
return static_cast<RABits*>(
_zone->allocZeroed(static_cast<size_t>(len) * RABits::kEntitySize));
}
ASMJIT_INLINE RABits* copyBits(const RABits* src, uint32_t len) {
return static_cast<RABits*>(
_zone->dup(src, static_cast<size_t>(len) * RABits::kEntitySize));
}
// --------------------------------------------------------------------------
// [Fetch]
// --------------------------------------------------------------------------
//! Fetch.
//!
//! Fetch iterates over all nodes and gathers information about all variables
//! used. The process generates information required by register allocator,
//! variable liveness analysis and translator.
virtual Error fetch() = 0;
// --------------------------------------------------------------------------
// [Unreachable Code]
// --------------------------------------------------------------------------
//! Add unreachable-flow data to the unreachable flow list.
ASMJIT_INLINE Error addUnreachableNode(CBNode* node) {
ZoneList<CBNode*>::Link* link = _zone->allocT<ZoneList<CBNode*>::Link>();
if (!link) return DebugUtils::errored(kErrorNoHeapMemory);
link->setValue(node);
_unreachableList.append(link);
return kErrorOk;
}
//! Remove unreachable code.
virtual Error removeUnreachableCode();
// --------------------------------------------------------------------------
// [Code-Flow]
// --------------------------------------------------------------------------
//! Add returning node (i.e. node that returns and where liveness analysis
//! should start).
ASMJIT_INLINE Error addReturningNode(CBNode* node) {
ZoneList<CBNode*>::Link* link = _zone->allocT<ZoneList<CBNode*>::Link>();
if (!link) return DebugUtils::errored(kErrorNoHeapMemory);
link->setValue(node);
_returningList.append(link);
return kErrorOk;
}
//! Add jump-flow data to the jcc flow list.
ASMJIT_INLINE Error addJccNode(CBNode* node) {
ZoneList<CBNode*>::Link* link = _zone->allocT<ZoneList<CBNode*>::Link>();
if (!link) return DebugUtils::errored(kErrorNoHeapMemory);
link->setValue(node);
_jccList.append(link);
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Analyze]
// --------------------------------------------------------------------------
//! Perform variable liveness analysis.
//!
//! Analysis phase iterates over nodes in reverse order and generates a bit
//! array describing variables that are alive at every node in the function.
//! When the analysis start all variables are assumed dead. When a read or
//! read/write operations of a variable is detected the variable becomes
//! alive; when only write operation is detected the variable becomes dead.
//!
//! When a label is found all jumps to that label are followed and analysis
//! repeats until all variables are resolved.
virtual Error livenessAnalysis();
// --------------------------------------------------------------------------
// [Annotate]
// --------------------------------------------------------------------------
virtual Error annotate() = 0;
virtual Error formatInlineComment(StringBuilder& dst, CBNode* node);
// --------------------------------------------------------------------------
// [Translate]
// --------------------------------------------------------------------------
//! Translate code by allocating registers and handling state changes.
virtual Error translate() = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
Zone* _zone; //!< Zone passed to `process()`.
ZoneHeap _heap; //!< ZoneHeap that uses `_zone`.
CCFunc* _func; //!< Function being processed.
CBNode* _stop; //!< Stop node.
//! \internal
//!
//! Offset (how many bytes to add) to `VarMap` to get `TiedReg` array. Used
//! by liveness analysis shared across all backends. This is needed because
//! `VarMap` is a base class for a specialized version that liveness analysis
//! doesn't use, it just needs `TiedReg` array.
uint32_t _varMapToVaListOffset;
uint8_t _emitComments; //!< Whether to emit comments.
ZoneList<CBNode*> _unreachableList; //!< Unreachable nodes.
ZoneList<CBNode*> _returningList; //!< Returning nodes.
ZoneList<CBNode*> _jccList; //!< Jump nodes.
ZoneVector<VirtReg*> _contextVd; //!< All variables used by the current function.
RACell* _memVarCells; //!< Memory used to spill variables.
RACell* _memStackCells; //!< Memory used to allocate memory on the stack.
uint32_t _mem1ByteVarsUsed; //!< Count of 1-byte cells.
uint32_t _mem2ByteVarsUsed; //!< Count of 2-byte cells.
uint32_t _mem4ByteVarsUsed; //!< Count of 4-byte cells.
uint32_t _mem8ByteVarsUsed; //!< Count of 8-byte cells.
uint32_t _mem16ByteVarsUsed; //!< Count of 16-byte cells.
uint32_t _mem32ByteVarsUsed; //!< Count of 32-byte cells.
uint32_t _mem64ByteVarsUsed; //!< Count of 64-byte cells.
uint32_t _memStackCellsUsed; //!< Count of stack memory cells.
uint32_t _memMaxAlign; //!< Maximum memory alignment used by the function.
uint32_t _memVarTotal; //!< Count of bytes used by variables.
uint32_t _memStackTotal; //!< Count of bytes used by stack.
uint32_t _memAllTotal; //!< Count of bytes used by variables and stack after alignment.
uint32_t _annotationLength; //!< Default length of an annotated instruction.
RAState* _state; //!< Current RA state.
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_BASE_REGALLOC_P_H

View file

@ -0,0 +1,147 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/assembler.h"
#include "../base/cpuinfo.h"
#include "../base/runtime.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
static ASMJIT_INLINE void hostFlushInstructionCache(const void* p, size_t size) noexcept {
// Only useful on non-x86 architectures.
#if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
# if ASMJIT_OS_WINDOWS
// Windows has a built-in support in kernel32.dll.
::FlushInstructionCache(_memMgr.getProcessHandle(), p, size);
# endif // ASMJIT_OS_WINDOWS
#else
ASMJIT_UNUSED(p);
ASMJIT_UNUSED(size);
#endif // !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
}
static ASMJIT_INLINE uint32_t hostDetectNaturalStackAlignment() noexcept {
// Alignment is assumed to match the pointer-size by default.
uint32_t alignment = sizeof(intptr_t);
// X86 & X64
// ---------
//
// - 32-bit X86 requires stack to be aligned to 4 bytes. Modern Linux, Mac
// and UNIX guarantees 16-byte stack alignment even on 32-bit. I'm not
// sure about all other UNIX operating systems, because 16-byte alignment
//! is addition to an older specification.
// - 64-bit X86 requires stack to be aligned to at least 16 bytes.
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
int kIsModernOS = ASMJIT_OS_LINUX || // Linux & ANDROID.
ASMJIT_OS_MAC || // OSX and iOS.
ASMJIT_OS_BSD ; // BSD variants.
alignment = ASMJIT_ARCH_X64 || kIsModernOS ? 16 : 4;
#endif
// ARM32 & ARM64
// -------------
//
// - 32-bit ARM requires stack to be aligned to 8 bytes.
// - 64-bit ARM requires stack to be aligned to 16 bytes.
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
alignment = ASMJIT_ARCH_ARM32 ? 8 : 16;
#endif
return alignment;
}
// ============================================================================
// [asmjit::Runtime - Construction / Destruction]
// ============================================================================
Runtime::Runtime() noexcept
: _codeInfo(),
_runtimeType(kRuntimeNone),
_allocType(VMemMgr::kAllocFreeable) {}
Runtime::~Runtime() noexcept {}
// ============================================================================
// [asmjit::HostRuntime - Construction / Destruction]
// ============================================================================
HostRuntime::HostRuntime() noexcept {
_runtimeType = kRuntimeJit;
// Setup the CodeInfo of this Runtime.
_codeInfo._archInfo = CpuInfo::getHost().getArchInfo();
_codeInfo._stackAlignment = static_cast<uint8_t>(hostDetectNaturalStackAlignment());
_codeInfo._cdeclCallConv = CallConv::kIdHostCDecl;
_codeInfo._stdCallConv = CallConv::kIdHostStdCall;
_codeInfo._fastCallConv = CallConv::kIdHostFastCall;
}
HostRuntime::~HostRuntime() noexcept {}
// ============================================================================
// [asmjit::HostRuntime - Interface]
// ============================================================================
void HostRuntime::flush(const void* p, size_t size) noexcept {
hostFlushInstructionCache(p, size);
}
// ============================================================================
// [asmjit::JitRuntime - Construction / Destruction]
// ============================================================================
JitRuntime::JitRuntime() noexcept {}
JitRuntime::~JitRuntime() noexcept {}
// ============================================================================
// [asmjit::JitRuntime - Interface]
// ============================================================================
Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
size_t codeSize = code->getCodeSize();
if (ASMJIT_UNLIKELY(codeSize == 0)) {
*dst = nullptr;
return DebugUtils::errored(kErrorNoCodeGenerated);
}
void* p = _memMgr.alloc(codeSize, getAllocType());
if (ASMJIT_UNLIKELY(!p)) {
*dst = nullptr;
return DebugUtils::errored(kErrorNoVirtualMemory);
}
// Relocate the code and release the unused memory back to `VMemMgr`.
size_t relocSize = code->relocate(p);
if (ASMJIT_UNLIKELY(relocSize == 0)) {
*dst = nullptr;
_memMgr.release(p);
return DebugUtils::errored(kErrorInvalidState);
}
if (relocSize < codeSize)
_memMgr.shrink(p, relocSize);
flush(p, relocSize);
*dst = p;
return kErrorOk;
}
Error JitRuntime::_release(void* p) noexcept {
return _memMgr.release(p);
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,198 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_RUNTIME_H
#define _ASMJIT_BASE_RUNTIME_H
// [Dependencies]
#include "../base/codeholder.h"
#include "../base/vmem.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [Forward Declarations]
// ============================================================================
class CodeHolder;
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::Runtime]
// ============================================================================
//! Base runtime.
class ASMJIT_VIRTAPI Runtime {
public:
ASMJIT_NONCOPYABLE(Runtime)
ASMJIT_ENUM(RuntimeType) {
kRuntimeNone = 0,
kRuntimeJit = 1,
kRuntimeRemote = 2
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `Runtime` instance.
ASMJIT_API Runtime() noexcept;
//! Destroy the `Runtime` instance.
ASMJIT_API virtual ~Runtime() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get CodeInfo of this runtime.
//!
//! CodeInfo can be used to setup a CodeHolder in case you plan to generate a
//! code compatible and executable by this Runtime.
ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; }
//! Get the Runtime's architecture type, see \ref ArchInfo::Type.
ASMJIT_INLINE uint32_t getArchType() const noexcept { return _codeInfo.getArchType(); }
//! Get the Runtime's architecture sub-type, see \ref ArchInfo::SubType.
ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _codeInfo.getArchSubType(); }
//! Get the runtime type, see \ref Type.
ASMJIT_INLINE uint32_t getRuntimeType() const noexcept { return _runtimeType; }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
// NOTE: To allow passing function pointers to `add()` and `release()` the
// virtual methods are prefixed with `_` and called from templates.
template<typename Func>
ASMJIT_INLINE Error add(Func* dst, CodeHolder* code) noexcept {
return _add(Internal::ptr_cast<void**, Func*>(dst), code);
}
template<typename Func>
ASMJIT_INLINE Error release(Func dst) noexcept {
return _release(Internal::ptr_cast<void*, Func>(dst));
}
//! Allocate a memory needed for a code stored in the \ref CodeHolder and
//! relocate it to the target location.
//!
//! The beginning of the memory allocated for the function is returned in
//! `dst`. If failed the \ref Error code is returned and `dst` is set to null
//! (this means that you don't have to set it to null before calling `add()`).
virtual Error _add(void** dst, CodeHolder* code) noexcept = 0;
//! Release `p` allocated by `add()`.
virtual Error _release(void* p) noexcept = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
CodeInfo _codeInfo; //!< Basic information about the Runtime's code.
uint8_t _runtimeType; //!< Type of the runtime.
uint8_t _allocType; //!< Type of the allocator the Runtime uses.
uint8_t _reserved[6]; //!< \internal
};
// ============================================================================
// [asmjit::HostRuntime]
// ============================================================================
//! Runtime designed to be used in the same process the code is generated in.
class ASMJIT_VIRTAPI HostRuntime : public Runtime {
public:
ASMJIT_NONCOPYABLE(HostRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `HostRuntime` instance.
ASMJIT_API HostRuntime() noexcept;
//! Destroy the `HostRuntime` instance.
ASMJIT_API virtual ~HostRuntime() noexcept;
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Flush an instruction cache.
//!
//! This member function is called after the code has been copied to the
//! destination buffer. It is only useful for JIT code generation as it
//! causes a flush of the processor's cache.
//!
//! Flushing is basically a NOP under X86/X64, but is needed by architectures
//! that do not have a transparent instruction cache like ARM.
//!
//! This function can also be overridden to improve compatibility with tools
//! such as Valgrind, however, it's not an official part of AsmJit.
ASMJIT_API virtual void flush(const void* p, size_t size) noexcept;
};
// ============================================================================
// [asmjit::JitRuntime]
// ============================================================================
//! Runtime designed to store and execute code generated at runtime (JIT).
class ASMJIT_VIRTAPI JitRuntime : public HostRuntime {
public:
ASMJIT_NONCOPYABLE(JitRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `JitRuntime` instance.
ASMJIT_API JitRuntime() noexcept;
//! Destroy the `JitRuntime` instance.
ASMJIT_API virtual ~JitRuntime() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the type of allocation.
ASMJIT_INLINE uint32_t getAllocType() const noexcept { return _allocType; }
//! Set the type of allocation.
ASMJIT_INLINE void setAllocType(uint32_t allocType) noexcept { _allocType = allocType; }
//! Get the virtual memory manager.
ASMJIT_INLINE VMemMgr* getMemMgr() const noexcept { return const_cast<VMemMgr*>(&_memMgr); }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API Error _add(void** dst, CodeHolder* code) noexcept override;
ASMJIT_API Error _release(void* p) noexcept override;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Virtual memory manager.
VMemMgr _memMgr;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_RUNTIME_H

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,353 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/string.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::StringBuilder - Construction / Destruction]
// ============================================================================
// Should be placed in read-only memory.
static const char StringBuilder_empty[4] = { 0 };
StringBuilder::StringBuilder() noexcept
: _data(const_cast<char*>(StringBuilder_empty)),
_length(0),
_capacity(0),
_canFree(false) {}
StringBuilder::~StringBuilder() noexcept {
if (_canFree)
Internal::releaseMemory(_data);
}
// ============================================================================
// [asmjit::StringBuilder - Prepare / Reserve]
// ============================================================================
ASMJIT_FAVOR_SIZE char* StringBuilder::prepare(uint32_t op, size_t len) noexcept {
if (op == kStringOpSet) {
// We don't care here, but we can't return a null pointer since it indicates
// failure in memory allocation.
if (len == 0) {
if (_data != StringBuilder_empty)
_data[0] = 0;
_length = 0;
return _data;
}
if (_capacity < len) {
if (len >= IntTraits<size_t>::maxValue() - sizeof(intptr_t) * 2)
return nullptr;
size_t to = Utils::alignTo<size_t>(len, sizeof(intptr_t));
if (to < 256 - sizeof(intptr_t))
to = 256 - sizeof(intptr_t);
char* newData = static_cast<char*>(Internal::allocMemory(to + sizeof(intptr_t)));
if (!newData) {
clear();
return nullptr;
}
if (_canFree)
Internal::releaseMemory(_data);
_data = newData;
_capacity = to + sizeof(intptr_t) - 1;
_canFree = true;
}
_data[len] = 0;
_length = len;
ASMJIT_ASSERT(_length <= _capacity);
return _data;
}
else {
// We don't care here, but we can't return a null pointer since it indicates
// failure of memory allocation.
if (len == 0)
return _data + _length;
// Overflow.
if (IntTraits<size_t>::maxValue() - sizeof(intptr_t) * 2 - _length < len)
return nullptr;
size_t after = _length + len;
if (_capacity < after) {
size_t to = _capacity;
if (to < 256)
to = 256;
while (to < 1024 * 1024 && to < after)
to *= 2;
if (to < after) {
to = after;
if (to < (IntTraits<size_t>::maxValue() - 1024 * 32))
to = Utils::alignTo<size_t>(to, 1024 * 32);
}
to = Utils::alignTo<size_t>(to, sizeof(intptr_t));
char* newData = static_cast<char*>(Internal::allocMemory(to + sizeof(intptr_t)));
if (!newData) return nullptr;
::memcpy(newData, _data, _length);
if (_canFree)
Internal::releaseMemory(_data);
_data = newData;
_capacity = to + sizeof(intptr_t) - 1;
_canFree = true;
}
char* ret = _data + _length;
_data[after] = 0;
_length = after;
ASMJIT_ASSERT(_length <= _capacity);
return ret;
}
}
ASMJIT_FAVOR_SIZE Error StringBuilder::reserve(size_t to) noexcept {
if (_capacity >= to)
return kErrorOk;
if (to >= IntTraits<size_t>::maxValue() - sizeof(intptr_t) * 2)
return DebugUtils::errored(kErrorNoHeapMemory);
to = Utils::alignTo<size_t>(to, sizeof(intptr_t));
char* newData = static_cast<char*>(Internal::allocMemory(to + sizeof(intptr_t)));
if (!newData)
return DebugUtils::errored(kErrorNoHeapMemory);
::memcpy(newData, _data, _length + 1);
if (_canFree)
Internal::releaseMemory(_data);
_data = newData;
_capacity = to + sizeof(intptr_t) - 1;
_canFree = true;
return kErrorOk;
}
// ============================================================================
// [asmjit::StringBuilder - Clear]
// ============================================================================
void StringBuilder::clear() noexcept {
if (_data != StringBuilder_empty)
_data[0] = 0;
_length = 0;
}
// ============================================================================
// [asmjit::StringBuilder - Methods]
// ============================================================================
Error StringBuilder::_opString(uint32_t op, const char* str, size_t len) noexcept {
if (len == Globals::kInvalidIndex)
len = str ? ::strlen(str) : static_cast<size_t>(0);
char* p = prepare(op, len);
if (!p) return DebugUtils::errored(kErrorNoHeapMemory);
::memcpy(p, str, len);
return kErrorOk;
}
Error StringBuilder::_opChar(uint32_t op, char c) noexcept {
char* p = prepare(op, 1);
if (!p) return DebugUtils::errored(kErrorNoHeapMemory);
*p = c;
return kErrorOk;
}
Error StringBuilder::_opChars(uint32_t op, char c, size_t n) noexcept {
char* p = prepare(op, n);
if (!p) return DebugUtils::errored(kErrorNoHeapMemory);
::memset(p, c, n);
return kErrorOk;
}
static const char StringBuilder_numbers[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
Error StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept {
if (base < 2 || base > 36)
base = 10;
char buf[128];
char* p = buf + ASMJIT_ARRAY_SIZE(buf);
uint64_t orig = i;
char sign = '\0';
// --------------------------------------------------------------------------
// [Sign]
// --------------------------------------------------------------------------
if ((flags & kStringFormatSigned) != 0 && static_cast<int64_t>(i) < 0) {
i = static_cast<uint64_t>(-static_cast<int64_t>(i));
sign = '-';
}
else if ((flags & kStringFormatShowSign) != 0) {
sign = '+';
}
else if ((flags & kStringFormatShowSpace) != 0) {
sign = ' ';
}
// --------------------------------------------------------------------------
// [Number]
// --------------------------------------------------------------------------
do {
uint64_t d = i / base;
uint64_t r = i % base;
*--p = StringBuilder_numbers[r];
i = d;
} while (i);
size_t numberLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p);
// --------------------------------------------------------------------------
// [Alternate Form]
// --------------------------------------------------------------------------
if ((flags & kStringFormatAlternate) != 0) {
if (base == 8) {
if (orig != 0)
*--p = '0';
}
if (base == 16) {
*--p = 'x';
*--p = '0';
}
}
// --------------------------------------------------------------------------
// [Width]
// --------------------------------------------------------------------------
if (sign != 0)
*--p = sign;
if (width > 256)
width = 256;
if (width <= numberLength)
width = 0;
else
width -= numberLength;
// --------------------------------------------------------------------------
// Write]
// --------------------------------------------------------------------------
size_t prefixLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberLength;
char* data = prepare(op, prefixLength + width + numberLength);
if (!data)
return DebugUtils::errored(kErrorNoHeapMemory);
::memcpy(data, p, prefixLength);
data += prefixLength;
::memset(data, '0', width);
data += width;
::memcpy(data, p + prefixLength, numberLength);
return kErrorOk;
}
Error StringBuilder::_opHex(uint32_t op, const void* data, size_t len) noexcept {
char* dst;
if (len >= IntTraits<size_t>::maxValue() / 2 || !(dst = prepare(op, len * 2)))
return DebugUtils::errored(kErrorNoHeapMemory);;
const char* src = static_cast<const char*>(data);
for (size_t i = 0; i < len; i++, dst += 2, src++) {
dst[0] = StringBuilder_numbers[(src[0] >> 4) & 0xF];
dst[1] = StringBuilder_numbers[(src[0] ) & 0xF];
}
return kErrorOk;
}
Error StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept {
char buf[1024];
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0';
return _opString(op, buf);
}
Error StringBuilder::setFormat(const char* fmt, ...) noexcept {
bool result;
va_list ap;
va_start(ap, fmt);
result = _opVFormat(kStringOpSet, fmt, ap);
va_end(ap);
return result;
}
Error StringBuilder::appendFormat(const char* fmt, ...) noexcept {
bool result;
va_list ap;
va_start(ap, fmt);
result = _opVFormat(kStringOpAppend, fmt, ap);
va_end(ap);
return result;
}
bool StringBuilder::eq(const char* str, size_t len) const noexcept {
const char* aData = _data;
const char* bData = str;
size_t aLength = _length;
size_t bLength = len;
if (bLength == Globals::kInvalidIndex) {
size_t i;
for (i = 0; i < aLength; i++)
if (aData[i] != bData[i] || bData[i] == 0)
return false;
return bData[i] == 0;
}
else {
if (aLength != bLength)
return false;
return ::memcmp(aData, bData, aLength) == 0;
}
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,289 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_STRING_H
#define _ASMJIT_BASE_STRING_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::SmallString]
// ============================================================================
//! Small string is a template that helps to create strings that can be either
//! statically allocated if they are small, or externally allocated in case
//! their length exceed the limit. The `WholeSize` represents the size of the
//! whole `SmallString` structure, based on that size the maximum size of the
//! internal buffer is determined.
template<size_t WholeSize>
class SmallString {
public:
enum { kMaxEmbeddedLength = WholeSize - 5 };
ASMJIT_INLINE SmallString() noexcept { reset(); }
ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); }
ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; }
ASMJIT_INLINE bool isEmbedded() const noexcept { return _length <= kMaxEmbeddedLength; }
ASMJIT_INLINE bool mustEmbed(size_t len) const noexcept { return len <= kMaxEmbeddedLength; }
ASMJIT_INLINE uint32_t getLength() const noexcept { return _length; }
ASMJIT_INLINE char* getData() const noexcept {
return _length <= kMaxEmbeddedLength ? const_cast<char*>(_embedded) : _external[1];
}
ASMJIT_INLINE void setEmbedded(const char* data, size_t len) noexcept {
ASMJIT_ASSERT(len <= kMaxEmbeddedLength);
_length = static_cast<uint32_t>(len);
::memcpy(_embedded, data, len);
_embedded[len] = '\0';
}
ASMJIT_INLINE void setExternal(const char* data, size_t len) noexcept {
ASMJIT_ASSERT(len > kMaxEmbeddedLength);
ASMJIT_ASSERT(len <= ~static_cast<uint32_t>(0));
_length = static_cast<uint32_t>(len);
_external[1] = const_cast<char*>(data);
}
union {
struct {
uint32_t _length;
char _embedded[WholeSize - 4];
};
char* _external[2];
};
};
// ============================================================================
// [asmjit::StringBuilder]
// ============================================================================
//! String builder.
//!
//! String builder was designed to be able to build a string using append like
//! operation to append numbers, other strings, or signle characters. It can
//! allocate it's own buffer or use a buffer created on the stack.
//!
//! String builder contains method specific to AsmJit functionality, used for
//! logging or HTML output.
class StringBuilder {
public:
ASMJIT_NONCOPYABLE(StringBuilder)
//! \internal
//!
//! String operation.
ASMJIT_ENUM(OpType) {
kStringOpSet = 0, //!< Replace the current string by a given content.
kStringOpAppend = 1 //!< Append a given content to the current string.
};
//! \internal
//!
//! String format flags.
ASMJIT_ENUM(StringFormatFlags) {
kStringFormatShowSign = 0x00000001,
kStringFormatShowSpace = 0x00000002,
kStringFormatAlternate = 0x00000004,
kStringFormatSigned = 0x80000000
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API StringBuilder() noexcept;
ASMJIT_API ~StringBuilder() noexcept;
ASMJIT_INLINE StringBuilder(const _NoInit&) noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get string builder capacity.
ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; }
//! Get length.
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
//! Get null-terminated string data.
ASMJIT_INLINE char* getData() noexcept { return _data; }
//! Get null-terminated string data (const).
ASMJIT_INLINE const char* getData() const noexcept { return _data; }
// --------------------------------------------------------------------------
// [Prepare / Reserve]
// --------------------------------------------------------------------------
//! Prepare to set/append.
ASMJIT_API char* prepare(uint32_t op, size_t len) noexcept;
//! Reserve `to` bytes in string builder.
ASMJIT_API Error reserve(size_t to) noexcept;
// --------------------------------------------------------------------------
// [Clear]
// --------------------------------------------------------------------------
//! Clear the content in String builder.
ASMJIT_API void clear() noexcept;
// --------------------------------------------------------------------------
// [Op]
// --------------------------------------------------------------------------
ASMJIT_API Error _opString(uint32_t op, const char* str, size_t len = Globals::kInvalidIndex) noexcept;
ASMJIT_API Error _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept;
ASMJIT_API Error _opChar(uint32_t op, char c) noexcept;
ASMJIT_API Error _opChars(uint32_t op, char c, size_t n) noexcept;
ASMJIT_API Error _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept;
ASMJIT_API Error _opHex(uint32_t op, const void* data, size_t len) noexcept;
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
//! Replace the current string with `str` having `len` characters (or `kInvalidIndex` if it's null terminated).
ASMJIT_INLINE Error setString(const char* str, size_t len = Globals::kInvalidIndex) noexcept { return _opString(kStringOpSet, str, len); }
//! Replace the current content by a formatted string `fmt`.
ASMJIT_API Error setFormat(const char* fmt, ...) noexcept;
//! Replace the current content by a formatted string `fmt` (va_list version).
ASMJIT_INLINE Error setFormatVA(const char* fmt, va_list ap) noexcept { return _opVFormat(kStringOpSet, fmt, ap); }
//! Replace the current content by a single `c` character.
ASMJIT_INLINE Error setChar(char c) noexcept { return _opChar(kStringOpSet, c); }
//! Replace the current content by `c` character `n` times.
ASMJIT_INLINE Error setChars(char c, size_t n) noexcept { return _opChars(kStringOpSet, c, n); }
//! Replace the current content by a formatted integer `i` (signed).
ASMJIT_INLINE Error setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags | kStringFormatSigned);
}
//! Replace the current content by a formatted integer `i` (unsigned).
ASMJIT_INLINE Error setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags);
}
//! Replace the current content by the given `data` converted to a HEX string.
ASMJIT_INLINE Error setHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpSet, data, len);
}
// --------------------------------------------------------------------------
// [Append]
// --------------------------------------------------------------------------
//! Append string `str` having `len` characters (or `kInvalidIndex` if it's null terminated).
ASMJIT_INLINE Error appendString(const char* str, size_t len = Globals::kInvalidIndex) noexcept { return _opString(kStringOpAppend, str, len); }
//! Append a formatted string `fmt`.
ASMJIT_API Error appendFormat(const char* fmt, ...) noexcept;
//! Append a formatted string `fmt` (va_list version).
ASMJIT_INLINE Error appendFormatVA(const char* fmt, va_list ap) noexcept { return _opVFormat(kStringOpAppend, fmt, ap); }
//! Append a single `c` character.
ASMJIT_INLINE Error appendChar(char c) noexcept { return _opChar(kStringOpAppend, c); }
//! Append `c` character `n` times.
ASMJIT_INLINE Error appendChars(char c, size_t n) noexcept { return _opChars(kStringOpAppend, c, n); }
//! Append `i`.
ASMJIT_INLINE Error appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, static_cast<uint64_t>(i), base, width, flags | kStringFormatSigned);
}
//! Append `i`.
ASMJIT_INLINE Error appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, i, base, width, flags);
}
//! Append the given `data` converted to a HEX string.
ASMJIT_INLINE Error appendHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpAppend, data, len);
}
// --------------------------------------------------------------------------
// [Eq]
// --------------------------------------------------------------------------
//! Check for equality with other `str` of length `len`.
ASMJIT_API bool eq(const char* str, size_t len = Globals::kInvalidIndex) const noexcept;
//! Check for equality with `other`.
ASMJIT_INLINE bool eq(const StringBuilder& other) const noexcept { return eq(other._data, other._length); }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool operator==(const StringBuilder& other) const noexcept { return eq(other); }
ASMJIT_INLINE bool operator!=(const StringBuilder& other) const noexcept { return !eq(other); }
ASMJIT_INLINE bool operator==(const char* str) const noexcept { return eq(str); }
ASMJIT_INLINE bool operator!=(const char* str) const noexcept { return !eq(str); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
char* _data; //!< String data.
size_t _length; //!< String length.
size_t _capacity; //!< String capacity.
size_t _canFree; //!< If the string data can be freed.
};
// ============================================================================
// [asmjit::StringBuilderTmp]
// ============================================================================
//! Temporary string builder, has statically allocated `N` bytes.
template<size_t N>
class StringBuilderTmp : public StringBuilder {
public:
ASMJIT_NONCOPYABLE(StringBuilderTmp<N>)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE StringBuilderTmp() noexcept : StringBuilder(NoInit) {
_data = _embeddedData;
_data[0] = 0;
_length = 0;
_capacity = N;
_canFree = false;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Embedded data.
char _embeddedData[static_cast<size_t>(
N + 1 + sizeof(intptr_t)) & ~static_cast<size_t>(sizeof(intptr_t) - 1)];
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_STRING_H

View file

@ -0,0 +1,176 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/utils.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Utils - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(base_utils) {
uint32_t i;
INFO("IntTraits<>");
EXPECT(IntTraits<signed char>::kIsSigned,"IntTraits<signed char> should report signed");
EXPECT(IntTraits<short>::kIsSigned, "IntTraits<signed short> should report signed");
EXPECT(IntTraits<int>::kIsSigned, "IntTraits<int> should report signed");
EXPECT(IntTraits<long>::kIsSigned, "IntTraits<long> should report signed");
EXPECT(IntTraits<unsigned char>::kIsUnsigned, "IntTraits<unsigned char> should report unsigned");
EXPECT(IntTraits<unsigned short>::kIsUnsigned, "IntTraits<unsigned short> should report unsigned");
EXPECT(IntTraits<unsigned int>::kIsUnsigned, "IntTraits<unsigned int> should report unsigned");
EXPECT(IntTraits<unsigned long>::kIsUnsigned, "IntTraits<unsigned long> should report unsigned");
EXPECT(IntTraits<intptr_t>::kIsSigned, "IntTraits<intptr_t> should report signed");
EXPECT(IntTraits<uintptr_t>::kIsUnsigned, "IntTraits<uintptr_t> should report unsigned");
EXPECT(IntTraits<intptr_t>::kIsIntPtr, "IntTraits<intptr_t> should report intptr_t type");
EXPECT(IntTraits<uintptr_t>::kIsIntPtr, "IntTraits<uintptr_t> should report intptr_t type");
INFO("Utils::inInterval()");
EXPECT(Utils::inInterval<int>(11 , 10, 20) == true , "Utils::inInterval<int> should return true if inside");
EXPECT(Utils::inInterval<int>(101, 10, 20) == false, "Utils::inInterval<int> should return false if outside");
INFO("Utils::isInt8()");
EXPECT(Utils::isInt8(-128) == true , "Utils::isInt8<> should return true if inside");
EXPECT(Utils::isInt8( 127) == true , "Utils::isInt8<> should return true if inside");
EXPECT(Utils::isInt8(-129) == false, "Utils::isInt8<> should return false if outside");
EXPECT(Utils::isInt8( 128) == false, "Utils::isInt8<> should return false if outside");
INFO("Utils::isInt16()");
EXPECT(Utils::isInt16(-32768) == true , "Utils::isInt16<> should return true if inside");
EXPECT(Utils::isInt16( 32767) == true , "Utils::isInt16<> should return true if inside");
EXPECT(Utils::isInt16(-32769) == false, "Utils::isInt16<> should return false if outside");
EXPECT(Utils::isInt16( 32768) == false, "Utils::isInt16<> should return false if outside");
INFO("Utils::isInt32()");
EXPECT(Utils::isInt32( 2147483647 ) == true, "Utils::isInt32<int> should return true if inside");
EXPECT(Utils::isInt32(-2147483647 - 1) == true, "Utils::isInt32<int> should return true if inside");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(2147483648)) == false, "Utils::isInt32<int> should return false if outside");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == false, "Utils::isInt32<int> should return false if outside");
EXPECT(Utils::isInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isInt32<int> should return false if outside");
INFO("Utils::isUInt8()");
EXPECT(Utils::isUInt8(0) == true , "Utils::isUInt8<> should return true if inside");
EXPECT(Utils::isUInt8(255) == true , "Utils::isUInt8<> should return true if inside");
EXPECT(Utils::isUInt8(256) == false, "Utils::isUInt8<> should return false if outside");
EXPECT(Utils::isUInt8(-1) == false, "Utils::isUInt8<> should return false if negative");
INFO("Utils::isUInt12()");
EXPECT(Utils::isUInt12(0) == true , "Utils::isUInt12<> should return true if inside");
EXPECT(Utils::isUInt12(4095) == true , "Utils::isUInt12<> should return true if inside");
EXPECT(Utils::isUInt12(4096) == false, "Utils::isUInt12<> should return false if outside");
EXPECT(Utils::isUInt12(-1) == false, "Utils::isUInt12<> should return false if negative");
INFO("Utils::isUInt16()");
EXPECT(Utils::isUInt16(0) == true , "Utils::isUInt16<> should return true if inside");
EXPECT(Utils::isUInt16(65535) == true , "Utils::isUInt16<> should return true if inside");
EXPECT(Utils::isUInt16(65536) == false, "Utils::isUInt16<> should return false if outside");
EXPECT(Utils::isUInt16(-1) == false, "Utils::isUInt16<> should return false if negative");
INFO("Utils::isUInt32()");
EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF)) == true, "Utils::isUInt32<uint64_t> should return true if inside");
EXPECT(Utils::isUInt32(ASMJIT_UINT64_C(0xFFFFFFFF) + 1) == false, "Utils::isUInt32<uint64_t> should return false if outside");
EXPECT(Utils::isUInt32(-1) == false, "Utils::isUInt32<int> should return false if negative");
INFO("Utils::isPower2()");
for (i = 0; i < 64; i++) {
EXPECT(Utils::isPowerOf2(static_cast<uint64_t>(1) << i) == true,
"Utils::isPower2() didn't report power of 2");
EXPECT(Utils::isPowerOf2((static_cast<uint64_t>(1) << i) ^ 0x001101) == false,
"Utils::isPower2() didn't report not power of 2");
}
INFO("Utils::mask()");
for (i = 0; i < 32; i++) {
EXPECT(Utils::mask(i) == (1 << i),
"Utils::mask(%u) should return %X", i, (1 << i));
}
INFO("Utils::bits()");
for (i = 0; i < 32; i++) {
uint32_t expectedBits = 0;
for (uint32_t b = 0; b < i; b++)
expectedBits |= static_cast<uint32_t>(1) << b;
EXPECT(Utils::bits(i) == expectedBits,
"Utils::bits(%u) should return %X", i, expectedBits);
}
INFO("Utils::hasBit()");
for (i = 0; i < 32; i++) {
EXPECT(Utils::hasBit((1 << i), i) == true,
"Utils::hasBit(%X, %u) should return true", (1 << i), i);
}
INFO("Utils::bitCount()");
for (i = 0; i < 32; i++) {
EXPECT(Utils::bitCount((1 << i)) == 1,
"Utils::bitCount(%X) should return true", (1 << i));
}
EXPECT(Utils::bitCount(0x000000F0) == 4, "");
EXPECT(Utils::bitCount(0x10101010) == 4, "");
EXPECT(Utils::bitCount(0xFF000000) == 8, "");
EXPECT(Utils::bitCount(0xFFFFFFF7) == 31, "");
EXPECT(Utils::bitCount(0x7FFFFFFF) == 31, "");
INFO("Utils::findFirstBit()");
for (i = 0; i < 32; i++) {
EXPECT(Utils::findFirstBit((1 << i)) == i,
"Utils::findFirstBit(%X) should return %u", (1 << i), i);
}
INFO("Utils::keepNOnesFromRight()");
EXPECT(Utils::keepNOnesFromRight(0xF, 1) == 0x1, "");
EXPECT(Utils::keepNOnesFromRight(0xF, 2) == 0x3, "");
EXPECT(Utils::keepNOnesFromRight(0xF, 3) == 0x7, "");
EXPECT(Utils::keepNOnesFromRight(0x5, 2) == 0x5, "");
EXPECT(Utils::keepNOnesFromRight(0xD, 2) == 0x5, "");
INFO("Utils::isAligned()");
EXPECT(Utils::isAligned<size_t>(0xFFFF, 4) == false, "");
EXPECT(Utils::isAligned<size_t>(0xFFF4, 4) == true , "");
EXPECT(Utils::isAligned<size_t>(0xFFF8, 8) == true , "");
EXPECT(Utils::isAligned<size_t>(0xFFF0, 16) == true , "");
INFO("Utils::alignTo()");
EXPECT(Utils::alignTo<size_t>(0xFFFF, 4) == 0x10000, "");
EXPECT(Utils::alignTo<size_t>(0xFFF4, 4) == 0x0FFF4, "");
EXPECT(Utils::alignTo<size_t>(0xFFF8, 8) == 0x0FFF8, "");
EXPECT(Utils::alignTo<size_t>(0xFFF0, 16) == 0x0FFF0, "");
EXPECT(Utils::alignTo<size_t>(0xFFF0, 32) == 0x10000, "");
INFO("Utils::alignToPowerOf2()");
EXPECT(Utils::alignToPowerOf2<size_t>(0xFFFF) == 0x10000, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0xF123) == 0x10000, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0x0F00) == 0x01000, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0x0100) == 0x00100, "");
EXPECT(Utils::alignToPowerOf2<size_t>(0x1001) == 0x02000, "");
INFO("Utils::alignDiff()");
EXPECT(Utils::alignDiff<size_t>(0xFFFF, 4) == 1, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF4, 4) == 0, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF8, 8) == 0, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF0, 16) == 0, "");
EXPECT(Utils::alignDiff<size_t>(0xFFF0, 32) == 16, "");
}
#endif // ASMJIT_TEST
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,154 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_VMEM_H
#define _ASMJIT_BASE_VMEM_H
// [Dependencies]
#include "../base/globals.h"
#include "../base/osutils.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::VMemMgr]
// ============================================================================
//! Reference implementation of memory manager that uses `VMemUtil` to allocate
//! chunks of virtual memory and bit arrays to manage it.
class VMemMgr {
public:
//! Type of virtual memory allocation, see `VMemMgr::alloc()`.
ASMJIT_ENUM(AllocType) {
//! Normal memory allocation, has to be freed by `VMemMgr::release()`.
kAllocFreeable = 0,
//! Allocate permanent memory, can't be freed.
kAllocPermanent = 1
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
#if !ASMJIT_OS_WINDOWS
//! Create a `VMemMgr` instance.
ASMJIT_API VMemMgr() noexcept;
#else
//! Create a `VMemMgr` instance.
//!
//! NOTE: When running on Windows it's possible to specify a `hProcess` to
//! be used for memory allocation. Using `hProcess` allows to allocate memory
//! of a remote process.
ASMJIT_API VMemMgr(HANDLE hProcess = static_cast<HANDLE>(0)) noexcept;
#endif // ASMJIT_OS_WINDOWS
//! Destroy the `VMemMgr` instance and free all blocks.
ASMJIT_API ~VMemMgr() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Free all allocated memory.
ASMJIT_API void reset() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
#if ASMJIT_OS_WINDOWS
//! Get the handle of the process memory manager is bound to.
ASMJIT_INLINE HANDLE getProcessHandle() const noexcept { return _hProcess; }
#endif // ASMJIT_OS_WINDOWS
//! Get how many bytes are currently allocated.
ASMJIT_INLINE size_t getAllocatedBytes() const noexcept { return _allocatedBytes; }
//! Get how many bytes are currently used.
ASMJIT_INLINE size_t getUsedBytes() const noexcept { return _usedBytes; }
//! Get whether to keep allocated memory after the `VMemMgr` is destroyed.
//!
//! \sa \ref setKeepVirtualMemory.
ASMJIT_INLINE bool getKeepVirtualMemory() const noexcept { return _keepVirtualMemory; }
//! Set whether to keep allocated memory after the memory manager is destroyed.
//!
//! This method is usable when patching code of remote process. You need to
//! allocate process memory, store generated assembler into it and patch the
//! method you want to redirect (into your code). This method affects only
//! VMemMgr destructor. After destruction all internal
//! structures are freed, only the process virtual memory remains.
//!
//! NOTE: Memory allocated with kAllocPermanent is always kept.
//!
//! \sa \ref getKeepVirtualMemory.
ASMJIT_INLINE void setKeepVirtualMemory(bool val) noexcept { _keepVirtualMemory = val; }
// --------------------------------------------------------------------------
// [Alloc / Release]
// --------------------------------------------------------------------------
//! Allocate a `size` bytes of virtual memory.
//!
//! Note that if you are implementing your own virtual memory manager then you
//! can quitly ignore type of allocation. This is mainly for AsmJit to memory
//! manager that allocated memory will be never freed.
ASMJIT_API void* alloc(size_t size, uint32_t type = kAllocFreeable) noexcept;
//! Free previously allocated memory at a given `address`.
ASMJIT_API Error release(void* p) noexcept;
//! Free extra memory allocated with `p`.
ASMJIT_API Error shrink(void* p, size_t used) noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
#if ASMJIT_OS_WINDOWS
HANDLE _hProcess; //!< Process passed to `VirtualAllocEx` and `VirtualFree`.
#endif // ASMJIT_OS_WINDOWS
Lock _lock; //!< Lock to enable thread-safe functionality.
size_t _blockSize; //!< Default block size.
size_t _blockDensity; //!< Default block density.
bool _keepVirtualMemory; //!< Keep virtual memory after destroyed.
size_t _allocatedBytes; //!< How many bytes are currently allocated.
size_t _usedBytes; //!< How many bytes are currently used.
//! \internal
//! \{
struct RbNode;
struct MemNode;
struct PermanentNode;
// Memory nodes root.
MemNode* _root;
// Memory nodes list.
MemNode* _first;
MemNode* _last;
MemNode* _optimal;
// Permanent memory.
PermanentNode* _permanent;
//! \}
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_VMEM_H

View file

@ -0,0 +1,962 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/utils.h"
#include "../base/zone.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! Zero size block used by `Zone` that doesn't have any memory allocated.
static const Zone::Block Zone_zeroBlock = { nullptr, nullptr, 0, { 0 } };
static ASMJIT_INLINE uint32_t Zone_getAlignmentOffsetFromAlignment(uint32_t x) noexcept {
switch (x) {
default: return 0;
case 0 : return 0;
case 1 : return 0;
case 2 : return 1;
case 4 : return 2;
case 8 : return 3;
case 16: return 4;
case 32: return 5;
case 64: return 6;
}
}
// ============================================================================
// [asmjit::Zone - Construction / Destruction]
// ============================================================================
Zone::Zone(uint32_t blockSize, uint32_t blockAlignment) noexcept
: _ptr(nullptr),
_end(nullptr),
_block(const_cast<Zone::Block*>(&Zone_zeroBlock)),
_blockSize(blockSize),
_blockAlignmentShift(Zone_getAlignmentOffsetFromAlignment(blockAlignment)) {}
Zone::~Zone() noexcept {
reset(true);
}
// ============================================================================
// [asmjit::Zone - Reset]
// ============================================================================
void Zone::reset(bool releaseMemory) noexcept {
Block* cur = _block;
// Can't be altered.
if (cur == &Zone_zeroBlock)
return;
if (releaseMemory) {
// Since cur can be in the middle of the double-linked list, we have to
// traverse to both directions `prev` and `next` separately.
Block* next = cur->next;
do {
Block* prev = cur->prev;
Internal::releaseMemory(cur);
cur = prev;
} while (cur);
cur = next;
while (cur) {
next = cur->next;
Internal::releaseMemory(cur);
cur = next;
}
_ptr = nullptr;
_end = nullptr;
_block = const_cast<Zone::Block*>(&Zone_zeroBlock);
}
else {
while (cur->prev)
cur = cur->prev;
_ptr = cur->data;
_end = _ptr + cur->size;
_block = cur;
}
}
// ============================================================================
// [asmjit::Zone - Alloc]
// ============================================================================
void* Zone::_alloc(size_t size) noexcept {
Block* curBlock = _block;
uint8_t* p;
size_t blockSize = std::max<size_t>(_blockSize, size);
size_t blockAlignment = getBlockAlignment();
// The `_alloc()` method can only be called if there is not enough space
// in the current block, see `alloc()` implementation for more details.
ASMJIT_ASSERT(curBlock == &Zone_zeroBlock || getRemainingSize() < size);
// If the `Zone` has been cleared the current block doesn't have to be the
// last one. Check if there is a block that can be used instead of allocating
// a new one. If there is a `next` block it's completely unused, we don't have
// to check for remaining bytes.
Block* next = curBlock->next;
if (next && next->size >= size) {
p = Utils::alignTo(next->data, blockAlignment);
_block = next;
_ptr = p + size;
_end = next->data + next->size;
return static_cast<void*>(p);
}
// Prevent arithmetic overflow.
if (ASMJIT_UNLIKELY(blockSize > (~static_cast<size_t>(0) - sizeof(Block) - blockAlignment)))
return nullptr;
blockSize += blockAlignment;
Block* newBlock = static_cast<Block*>(Internal::allocMemory(sizeof(Block) + blockSize));
if (ASMJIT_UNLIKELY(!newBlock))
return nullptr;
// Align the pointer to `blockAlignment` and adjust the size of this block
// accordingly. It's the same as using `blockAlignment - Utils::alignDiff()`,
// just written differently.
p = Utils::alignTo(newBlock->data, blockAlignment);
newBlock->prev = nullptr;
newBlock->next = nullptr;
newBlock->size = blockSize;
if (curBlock != &Zone_zeroBlock) {
newBlock->prev = curBlock;
curBlock->next = newBlock;
// Does only happen if there is a next block, but the requested memory
// can't fit into it. In this case a new buffer is allocated and inserted
// between the current block and the next one.
if (next) {
newBlock->next = next;
next->prev = newBlock;
}
}
_block = newBlock;
_ptr = p + size;
_end = newBlock->data + blockSize;
return static_cast<void*>(p);
}
void* Zone::allocZeroed(size_t size) noexcept {
void* p = alloc(size);
if (ASMJIT_UNLIKELY(!p)) return p;
return ::memset(p, 0, size);
}
void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
if (ASMJIT_UNLIKELY(!data || !size)) return nullptr;
ASMJIT_ASSERT(size != IntTraits<size_t>::maxValue());
uint8_t* m = allocT<uint8_t>(size + nullTerminate);
if (ASMJIT_UNLIKELY(!m)) return nullptr;
::memcpy(m, data, size);
if (nullTerminate) m[size] = '\0';
return static_cast<void*>(m);
}
char* Zone::sformat(const char* fmt, ...) noexcept {
if (ASMJIT_UNLIKELY(!fmt)) return nullptr;
char buf[512];
size_t len;
va_list ap;
va_start(ap, fmt);
len = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap);
buf[len++] = 0;
va_end(ap);
return static_cast<char*>(dup(buf, len));
}
// ============================================================================
// [asmjit::ZoneHeap - Helpers]
// ============================================================================
static bool ZoneHeap_hasDynamicBlock(ZoneHeap* self, ZoneHeap::DynamicBlock* block) noexcept {
ZoneHeap::DynamicBlock* cur = self->_dynamicBlocks;
while (cur) {
if (cur == block)
return true;
cur = cur->next;
}
return false;
}
// ============================================================================
// [asmjit::ZoneHeap - Init / Reset]
// ============================================================================
void ZoneHeap::reset(Zone* zone) noexcept {
// Free dynamic blocks.
DynamicBlock* block = _dynamicBlocks;
while (block) {
DynamicBlock* next = block->next;
Internal::releaseMemory(block);
block = next;
}
// Zero the entire class and initialize to the given `zone`.
::memset(this, 0, sizeof(*this));
_zone = zone;
}
// ============================================================================
// [asmjit::ZoneHeap - Alloc / Release]
// ============================================================================
void* ZoneHeap::_alloc(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
// We use our memory pool only if the requested block is of a reasonable size.
uint32_t slot;
if (_getSlotIndex(size, slot, allocatedSize)) {
// Slot reuse.
uint8_t* p = reinterpret_cast<uint8_t*>(_slots[slot]);
size = allocatedSize;
if (p) {
_slots[slot] = reinterpret_cast<Slot*>(p)->next;
//printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot);
return p;
}
// So use Zone to allocate a new chunk for us. But before we use it, we
// check if there is enough room for the new chunk in zone, and if not,
// we redistribute the remaining memory in Zone's current block into slots.
Zone* zone = _zone;
p = Utils::alignTo(zone->getCursor(), kBlockAlignment);
size_t remain = (p <= zone->getEnd()) ? (size_t)(zone->getEnd() - p) : size_t(0);
if (ASMJIT_LIKELY(remain >= size)) {
zone->setCursor(p + size);
//printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot);
return p;
}
else {
// Distribute the remaining memory to suitable slots.
if (remain >= kLoGranularity) {
do {
size_t distSize = std::min<size_t>(remain, kLoMaxSize);
uint32_t distSlot = static_cast<uint32_t>((distSize - kLoGranularity) / kLoGranularity);
ASMJIT_ASSERT(distSlot < kLoCount);
reinterpret_cast<Slot*>(p)->next = _slots[distSlot];
_slots[distSlot] = reinterpret_cast<Slot*>(p);
p += distSize;
remain -= distSize;
} while (remain >= kLoGranularity);
zone->setCursor(p);
}
p = static_cast<uint8_t*>(zone->_alloc(size));
if (ASMJIT_UNLIKELY(!p)) {
allocatedSize = 0;
return nullptr;
}
//printf("ALLOCATED %p of size %d (SLOT %d)\n", p, int(size), slot);
return p;
}
}
else {
// Allocate a dynamic block.
size_t overhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
// Handle a possible overflow.
if (ASMJIT_UNLIKELY(overhead >= ~static_cast<size_t>(0) - size))
return nullptr;
void* p = Internal::allocMemory(size + overhead);
if (ASMJIT_UNLIKELY(!p)) {
allocatedSize = 0;
return nullptr;
}
// Link as first in `_dynamicBlocks` double-linked list.
DynamicBlock* block = static_cast<DynamicBlock*>(p);
DynamicBlock* next = _dynamicBlocks;
if (next)
next->prev = block;
block->prev = nullptr;
block->next = next;
_dynamicBlocks = block;
// Align the pointer to the guaranteed alignment and store `DynamicBlock`
// at the end of the memory block, so `_releaseDynamic()` can find it.
p = Utils::alignTo(static_cast<uint8_t*>(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kBlockAlignment);
reinterpret_cast<DynamicBlock**>(p)[-1] = block;
allocatedSize = size;
//printf("ALLOCATED DYNAMIC %p of size %d\n", p, int(size));
return p;
}
}
void* ZoneHeap::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
void* p = _alloc(size, allocatedSize);
if (ASMJIT_UNLIKELY(!p)) return p;
return ::memset(p, 0, allocatedSize);
}
void ZoneHeap::_releaseDynamic(void* p, size_t size) noexcept {
ASMJIT_ASSERT(isInitialized());
//printf("RELEASING DYNAMIC %p of size %d\n", p, int(size));
// Pointer to `DynamicBlock` is stored at [-1].
DynamicBlock* block = reinterpret_cast<DynamicBlock**>(p)[-1];
ASMJIT_ASSERT(ZoneHeap_hasDynamicBlock(this, block));
// Unlink and free.
DynamicBlock* prev = block->prev;
DynamicBlock* next = block->next;
if (prev)
prev->next = next;
else
_dynamicBlocks = next;
if (next)
next->prev = prev;
Internal::releaseMemory(block);
}
// ============================================================================
// [asmjit::ZoneVectorBase - Helpers]
// ============================================================================
Error ZoneVectorBase::_grow(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept {
size_t threshold = Globals::kAllocThreshold / sizeOfT;
size_t capacity = _capacity;
size_t after = _length;
if (ASMJIT_UNLIKELY(IntTraits<size_t>::maxValue() - n < after))
return DebugUtils::errored(kErrorNoHeapMemory);
after += n;
if (capacity >= after)
return kErrorOk;
// ZoneVector is used as an array to hold short-lived data structures used
// during code generation. The growing strategy is simple - use small capacity
// at the beginning (very good for ZoneHeap) and then grow quicker to prevent
// successive reallocations.
if (capacity < 4)
capacity = 4;
else if (capacity < 8)
capacity = 8;
else if (capacity < 16)
capacity = 16;
else if (capacity < 64)
capacity = 64;
else if (capacity < 256)
capacity = 256;
while (capacity < after) {
if (capacity < threshold)
capacity *= 2;
else
capacity += threshold;
}
return _reserve(heap, sizeOfT, capacity);
}
Error ZoneVectorBase::_reserve(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept {
size_t oldCapacity = _capacity;
if (oldCapacity >= n) return kErrorOk;
size_t nBytes = n * sizeOfT;
if (ASMJIT_UNLIKELY(nBytes < n))
return DebugUtils::errored(kErrorNoHeapMemory);
size_t allocatedBytes;
uint8_t* newData = static_cast<uint8_t*>(heap->alloc(nBytes, allocatedBytes));
if (ASMJIT_UNLIKELY(!newData))
return DebugUtils::errored(kErrorNoHeapMemory);
void* oldData = _data;
if (_length)
::memcpy(newData, oldData, _length * sizeOfT);
if (oldData)
heap->release(oldData, oldCapacity * sizeOfT);
_capacity = allocatedBytes / sizeOfT;
ASMJIT_ASSERT(_capacity >= n);
_data = newData;
return kErrorOk;
}
Error ZoneVectorBase::_resize(ZoneHeap* heap, size_t sizeOfT, size_t n) noexcept {
size_t length = _length;
if (_capacity < n) {
ASMJIT_PROPAGATE(_grow(heap, sizeOfT, n - length));
ASMJIT_ASSERT(_capacity >= n);
}
if (length < n)
::memset(static_cast<uint8_t*>(_data) + length * sizeOfT, 0, (n - length) * sizeOfT);
_length = n;
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneBitVector - Ops]
// ============================================================================
Error ZoneBitVector::_resize(ZoneHeap* heap, size_t newLength, size_t idealCapacity, bool newBitsValue) noexcept {
ASMJIT_ASSERT(idealCapacity >= newLength);
if (newLength <= _length) {
// The size after the resize is lesser than or equal to the current length.
size_t idx = newLength / kBitsPerWord;
size_t bit = newLength % kBitsPerWord;
// Just set all bits outside of the new length in the last word to zero.
// There is a case that there are not bits to set if `bit` is zero. This
// happens when `newLength` is a multiply of `kBitsPerWord` like 64, 128,
// and so on. In that case don't change anything as that would mean settings
// bits outside of the `_length`.
if (bit)
_data[idx] &= (static_cast<uintptr_t>(1) << bit) - 1U;
_length = newLength;
return kErrorOk;
}
size_t oldLength = _length;
BitWord* data = _data;
if (newLength > _capacity) {
// Realloc needed... Calculate the minimum capacity (in bytes) requied.
size_t minimumCapacityInBits = Utils::alignTo<size_t>(idealCapacity, kBitsPerWord);
size_t allocatedCapacity;
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newLength))
return DebugUtils::errored(kErrorNoHeapMemory);
// Normalize to bytes.
size_t minimumCapacity = minimumCapacityInBits / 8;
BitWord* newData = static_cast<BitWord*>(heap->alloc(minimumCapacity, allocatedCapacity));
if (ASMJIT_UNLIKELY(!newData))
return DebugUtils::errored(kErrorNoHeapMemory);
// `allocatedCapacity` now contains number in bytes, we need bits.
size_t allocatedCapacityInBits = allocatedCapacity * 8;
// Arithmetic overflow should normally not happen. If it happens we just
// change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
// this value is still safe to be used to call `_heap->release(...)`.
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
allocatedCapacityInBits = minimumCapacityInBits;
if (oldLength)
::memcpy(newData, data, _wordsPerBits(oldLength));
if (data)
heap->release(data, _capacity / 8);
data = newData;
_data = data;
_capacity = allocatedCapacityInBits;
}
// Start (of the old length) and end (of the new length) bits
size_t idx = oldLength / kBitsPerWord;
size_t startBit = oldLength % kBitsPerWord;
size_t endBit = newLength % kBitsPerWord;
// Set new bits to either 0 or 1. The `pattern` is used to set multiple
// bits per bit-word and contains either all zeros or all ones.
BitWord pattern = _patternFromBit(newBitsValue);
// First initialize the last bit-word of the old length.
if (startBit) {
size_t nBits = 0;
if (idx == (newLength / kBitsPerWord)) {
// The number of bit-words is the same after the resize. In that case
// we need to set only bits necessary in the current last bit-word.
ASMJIT_ASSERT(startBit < endBit);
nBits = endBit - startBit;
}
else {
// There is be more bit-words after the resize. In that case we don't
// have to be extra careful about the last bit-word of the old length.
nBits = kBitsPerWord - startBit;
}
data[idx++] |= pattern << nBits;
}
// Initialize all bit-words after the last bit-word of the old length.
size_t endIdx = _wordsPerBits(newLength);
endIdx -= static_cast<size_t>(endIdx * kBitsPerWord == newLength);
while (idx <= endIdx)
data[idx++] = pattern;
// Clear unused bits of the last bit-word.
if (endBit)
data[endIdx] &= (static_cast<BitWord>(1) << endBit) - 1;
_length = newLength;
return kErrorOk;
}
Error ZoneBitVector::_append(ZoneHeap* heap, bool value) noexcept {
size_t kThreshold = Globals::kAllocThreshold * 8;
size_t newLength = _length + 1;
size_t idealCapacity = _capacity;
if (idealCapacity < 128)
idealCapacity = 128;
else if (idealCapacity <= kThreshold)
idealCapacity *= 2;
else
idealCapacity += kThreshold;
if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) {
// It's technically impossible that `_length + 1` overflows.
idealCapacity = newLength;
ASMJIT_ASSERT(idealCapacity > _capacity);
}
return _resize(heap, newLength, idealCapacity, value);
}
Error ZoneBitVector::fill(size_t from, size_t to, bool value) noexcept {
if (ASMJIT_UNLIKELY(from >= to)) {
if (from > to)
return DebugUtils::errored(kErrorInvalidArgument);
else
return kErrorOk;
}
ASMJIT_ASSERT(from <= _length);
ASMJIT_ASSERT(to <= _length);
// This is very similar to `ZoneBitVector::_fill()`, however, since we
// actually set bits that are already part of the container we need to
// special case filiing to zeros and ones.
size_t idx = from / kBitsPerWord;
size_t startBit = from % kBitsPerWord;
size_t endIdx = to / kBitsPerWord;
size_t endBit = to % kBitsPerWord;
BitWord* data = _data;
ASMJIT_ASSERT(data != nullptr);
// Special case for non-zero `startBit`.
if (startBit) {
if (idx == endIdx) {
ASMJIT_ASSERT(startBit < endBit);
size_t nBits = endBit - startBit;
BitWord mask = ((static_cast<BitWord>(1) << nBits) - 1) << startBit;
if (value)
data[idx] |= mask;
else
data[idx] &= ~mask;
return kErrorOk;
}
else {
BitWord mask = (static_cast<BitWord>(0) - 1) << startBit;
if (value)
data[idx++] |= mask;
else
data[idx++] &= ~mask;
}
}
// Fill all bits in case there is a gap between the current `idx` and `endIdx`.
if (idx < endIdx) {
BitWord pattern = _patternFromBit(value);
do {
data[idx++] = pattern;
} while (idx < endIdx);
}
// Special case for non-zero `endBit`.
if (endBit) {
BitWord mask = ((static_cast<BitWord>(1) << endBit) - 1);
if (value)
data[endIdx] |= mask;
else
data[endIdx] &= ~mask;
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneStackBase - Init / Reset]
// ============================================================================
Error ZoneStackBase::_init(ZoneHeap* heap, size_t middleIndex) noexcept {
ZoneHeap* oldHeap = _heap;
if (oldHeap) {
Block* block = _block[kSideLeft];
while (block) {
Block* next = block->getNext();
oldHeap->release(block, kBlockSize);
block = next;
}
_heap = nullptr;
_block[kSideLeft] = nullptr;
_block[kSideRight] = nullptr;
}
if (heap) {
Block* block = static_cast<Block*>(heap->alloc(kBlockSize));
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorNoHeapMemory);
block->_link[kSideLeft] = nullptr;
block->_link[kSideRight] = nullptr;
block->_start = (uint8_t*)block + middleIndex;
block->_end = (uint8_t*)block + middleIndex;
_heap = heap;
_block[kSideLeft] = block;
_block[kSideRight] = block;
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneStackBase - Ops]
// ============================================================================
Error ZoneStackBase::_prepareBlock(uint32_t side, size_t initialIndex) noexcept {
ASMJIT_ASSERT(isInitialized());
Block* prev = _block[side];
ASMJIT_ASSERT(!prev->isEmpty());
Block* block = _heap->allocT<Block>(kBlockSize);
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorNoHeapMemory);
block->_link[ side] = nullptr;
block->_link[!side] = prev;
block->_start = (uint8_t*)block + initialIndex;
block->_end = (uint8_t*)block + initialIndex;
prev->_link[side] = block;
_block[side] = block;
return kErrorOk;
}
void ZoneStackBase::_cleanupBlock(uint32_t side, size_t middleIndex) noexcept {
Block* block = _block[side];
ASMJIT_ASSERT(block->isEmpty());
Block* prev = block->_link[!side];
if (prev) {
ASMJIT_ASSERT(prev->_link[side] == block);
_heap->release(block, kBlockSize);
prev->_link[side] = nullptr;
_block[side] = prev;
}
else if (_block[!side] == prev && prev->isEmpty()) {
// If the container becomes empty center both pointers in the remaining block.
prev->_start = (uint8_t*)prev + middleIndex;
prev->_end = (uint8_t*)prev + middleIndex;
}
}
// ============================================================================
// [asmjit::ZoneHashBase - Utilities]
// ============================================================================
static uint32_t ZoneHash_getClosestPrime(uint32_t x) noexcept {
static const uint32_t primeTable[] = {
23, 53, 193, 389, 769, 1543, 3079, 6151, 12289, 24593
};
size_t i = 0;
uint32_t p;
do {
if ((p = primeTable[i]) > x)
break;
} while (++i < ASMJIT_ARRAY_SIZE(primeTable));
return p;
}
// ============================================================================
// [asmjit::ZoneHashBase - Reset]
// ============================================================================
void ZoneHashBase::reset(ZoneHeap* heap) noexcept {
ZoneHashNode** oldData = _data;
if (oldData != _embedded)
_heap->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
_heap = heap;
_size = 0;
_bucketsCount = 1;
_bucketsGrow = 1;
_data = _embedded;
_embedded[0] = nullptr;
}
// ============================================================================
// [asmjit::ZoneHashBase - Rehash]
// ============================================================================
void ZoneHashBase::_rehash(uint32_t newCount) noexcept {
ASMJIT_ASSERT(isInitialized());
ZoneHashNode** oldData = _data;
ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(
_heap->allocZeroed(static_cast<size_t>(newCount) * sizeof(ZoneHashNode*)));
// We can still store nodes into the table, but it will degrade.
if (ASMJIT_UNLIKELY(newData == nullptr))
return;
uint32_t i;
uint32_t oldCount = _bucketsCount;
for (i = 0; i < oldCount; i++) {
ZoneHashNode* node = oldData[i];
while (node) {
ZoneHashNode* next = node->_hashNext;
uint32_t hMod = node->_hVal % newCount;
node->_hashNext = newData[hMod];
newData[hMod] = node;
node = next;
}
}
// 90% is the maximum occupancy, can't overflow since the maximum capacity
// is limited to the last prime number stored in the prime table.
if (oldData != _embedded)
_heap->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
_bucketsCount = newCount;
_bucketsGrow = newCount * 9 / 10;
_data = newData;
}
// ============================================================================
// [asmjit::ZoneHashBase - Ops]
// ============================================================================
ZoneHashNode* ZoneHashBase::_put(ZoneHashNode* node) noexcept {
uint32_t hMod = node->_hVal % _bucketsCount;
ZoneHashNode* next = _data[hMod];
node->_hashNext = next;
_data[hMod] = node;
if (++_size >= _bucketsGrow && next) {
uint32_t newCapacity = ZoneHash_getClosestPrime(_bucketsCount);
if (newCapacity != _bucketsCount)
_rehash(newCapacity);
}
return node;
}
ZoneHashNode* ZoneHashBase::_del(ZoneHashNode* node) noexcept {
uint32_t hMod = node->_hVal % _bucketsCount;
ZoneHashNode** pPrev = &_data[hMod];
ZoneHashNode* p = *pPrev;
while (p) {
if (p == node) {
*pPrev = p->_hashNext;
return node;
}
pPrev = &p->_hashNext;
p = *pPrev;
}
return nullptr;
}
// ============================================================================
// [asmjit::Zone - Test]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(base_zonevector) {
Zone zone(8096 - Zone::kZoneOverhead);
ZoneHeap heap(&zone);
int i;
int kMax = 100000;
ZoneVector<int> vec;
INFO("ZoneVector<int> basic tests");
EXPECT(vec.append(&heap, 0) == kErrorOk);
EXPECT(vec.isEmpty() == false);
EXPECT(vec.getLength() == 1);
EXPECT(vec.getCapacity() >= 1);
EXPECT(vec.indexOf(0) == 0);
EXPECT(vec.indexOf(-11) == Globals::kInvalidIndex);
vec.clear();
EXPECT(vec.isEmpty());
EXPECT(vec.getLength() == 0);
EXPECT(vec.indexOf(0) == Globals::kInvalidIndex);
for (i = 0; i < kMax; i++) {
EXPECT(vec.append(&heap, i) == kErrorOk);
}
EXPECT(vec.isEmpty() == false);
EXPECT(vec.getLength() == static_cast<size_t>(kMax));
EXPECT(vec.indexOf(kMax - 1) == static_cast<size_t>(kMax - 1));
}
UNIT(base_ZoneBitVector) {
Zone zone(8096 - Zone::kZoneOverhead);
ZoneHeap heap(&zone);
size_t i, count;
size_t kMaxCount = 100;
ZoneBitVector vec;
EXPECT(vec.isEmpty());
EXPECT(vec.getLength() == 0);
INFO("ZoneBitVector::resize()");
for (count = 1; count < kMaxCount; count++) {
vec.clear();
EXPECT(vec.resize(&heap, count, false) == kErrorOk);
EXPECT(vec.getLength() == count);
for (i = 0; i < count; i++)
EXPECT(vec.getAt(i) == false);
vec.clear();
EXPECT(vec.resize(&heap, count, true) == kErrorOk);
EXPECT(vec.getLength() == count);
for (i = 0; i < count; i++)
EXPECT(vec.getAt(i) == true);
}
INFO("ZoneBitVector::fill()");
for (count = 1; count < kMaxCount; count += 2) {
vec.clear();
EXPECT(vec.resize(&heap, count) == kErrorOk);
EXPECT(vec.getLength() == count);
for (i = 0; i < (count + 1) / 2; i++) {
bool value = static_cast<bool>(i & 1);
EXPECT(vec.fill(i, count - i, value) == kErrorOk);
}
for (i = 0; i < count; i++) {
EXPECT(vec.getAt(i) == static_cast<bool>(i & 1));
}
}
}
UNIT(base_zonestack) {
Zone zone(8096 - Zone::kZoneOverhead);
ZoneHeap heap(&zone);
ZoneStack<int> stack;
INFO("ZoneStack<int> contains %d elements per one Block", ZoneStack<int>::kNumBlockItems);
EXPECT(stack.init(&heap) == kErrorOk);
EXPECT(stack.isEmpty(), "Stack must be empty after `init()`");
EXPECT(stack.append(42) == kErrorOk);
EXPECT(!stack.isEmpty() , "Stack must not be empty after an item has been appended");
EXPECT(stack.pop() == 42, "Stack.pop() must return the item that has been appended last");
EXPECT(stack.isEmpty() , "Stack must be empty after the last element has been removed");
EXPECT(stack.prepend(43) == kErrorOk);
EXPECT(!stack.isEmpty() , "Stack must not be empty after an item has been prepended");
EXPECT(stack.popFirst() == 43, "Stack.popFirst() must return the item that has been prepended last");
EXPECT(stack.isEmpty() , "Stack must be empty after the last element has been removed");
int i;
int iMin =-100;
int iMax = 100000;
INFO("Adding items from %d to %d to the stack", iMin, iMax);
for (i = 1; i <= iMax; i++) stack.append(i);
for (i = 0; i >= iMin; i--) stack.prepend(i);
INFO("Validating popFirst()");
for (i = iMin; i <= iMax; i++) {
int item = stack.popFirst();
EXPECT(i == item, "Item '%d' didn't match the item '%d' popped", i, item);
}
EXPECT(stack.isEmpty());
INFO("Adding items from %d to %d to the stack", iMin, iMax);
for (i = 0; i >= iMin; i--) stack.prepend(i);
for (i = 1; i <= iMax; i++) stack.append(i);
INFO("Validating pop()");
for (i = iMax; i >= iMin; i--) {
int item = stack.pop();
EXPECT(i == item, "Item '%d' didn't match the item '%d' popped", i, item);
}
EXPECT(stack.isEmpty());
}
#endif // ASMJIT_TEST
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,23 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_H
#define _ASMJIT_X86_H
// [Dependencies]
#include "./base.h"
#include "./x86/x86assembler.h"
#include "./x86/x86builder.h"
#include "./x86/x86compiler.h"
#include "./x86/x86emitter.h"
#include "./x86/x86inst.h"
#include "./x86/x86misc.h"
#include "./x86/x86operand.h"
// [Guard]
#endif // _ASMJIT_X86_H

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,96 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86ASSEMBLER_H
#define _ASMJIT_X86_X86ASSEMBLER_H
// [Dependencies]
#include "../base/assembler.h"
#include "../base/utils.h"
#include "../x86/x86emitter.h"
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::X86Assembler]
// ============================================================================
//! X86/X64 assembler.
//!
//! X86/X64 assembler emits machine-code into buffers managed by \ref CodeHolder.
class ASMJIT_VIRTAPI X86Assembler
: public Assembler,
public X86EmitterImplicitT<X86Assembler> {
public:
typedef Assembler Base;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API X86Assembler(CodeHolder* code = nullptr) noexcept;
ASMJIT_API virtual ~X86Assembler() noexcept;
// --------------------------------------------------------------------------
// [Compatibility]
// --------------------------------------------------------------------------
//! Explicit cast to `X86Emitter`.
ASMJIT_INLINE X86Emitter* asEmitter() noexcept { return reinterpret_cast<X86Emitter*>(this); }
//! Explicit cast to `X86Emitter` (const).
ASMJIT_INLINE const X86Emitter* asEmitter() const noexcept { return reinterpret_cast<const X86Emitter*>(this); }
//! Implicit cast to `X86Emitter`.
ASMJIT_INLINE operator X86Emitter&() noexcept { return *asEmitter(); }
//! Implicit cast to `X86Emitter` (const).
ASMJIT_INLINE operator const X86Emitter&() const noexcept { return *asEmitter(); }
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
// NOTE: X86Assembler uses _privateData to store 'address-override' bit that
// is used to decide whether to emit address-override (67H) prefix based on
// the memory BASE+INDEX registers. It's either `kX86MemInfo_67H_X86` or
// `kX86MemInfo_67H_X64`.
ASMJIT_INLINE uint32_t _getAddressOverrideMask() const noexcept { return _privateData; }
ASMJIT_INLINE void _setAddressOverrideMask(uint32_t m) noexcept { _privateData = m; }
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
// --------------------------------------------------------------------------
// [Code-Generation]
// --------------------------------------------------------------------------
using CodeEmitter::_emit;
ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) override;
ASMJIT_API Error align(uint32_t mode, uint32_t alignment) override;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_X86_X86ASSEMBLER_H

View file

@ -0,0 +1,66 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../x86/x86builder.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86Builder - Construction / Destruction]
// ============================================================================
X86Builder::X86Builder(CodeHolder* code) noexcept : CodeBuilder() {
if (code)
code->attach(this);
}
X86Builder::~X86Builder() noexcept {}
// ============================================================================
// [asmjit::X86Builder - Events]
// ============================================================================
Error X86Builder::onAttach(CodeHolder* code) noexcept {
uint32_t archType = code->getArchType();
if (!ArchInfo::isX86Family(archType))
return DebugUtils::errored(kErrorInvalidArch);
ASMJIT_PROPAGATE(Base::onAttach(code));
if (archType == ArchInfo::kTypeX86)
_nativeGpArray = x86OpData.gpd;
else
_nativeGpArray = x86OpData.gpq;
_nativeGpReg = _nativeGpArray[0];
return kErrorOk;
}
// ============================================================================
// [asmjit::X86Builder - Inst]
// ============================================================================
Error X86Builder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
// TODO:
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86 && !ASMJIT_DISABLE_COMPILER

View file

@ -0,0 +1,86 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86BUILDER_H
#define _ASMJIT_X86_X86BUILDER_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_BUILDER)
// [Dependencies]
#include "../base/codebuilder.h"
#include "../base/simdtypes.h"
#include "../x86/x86emitter.h"
#include "../x86/x86misc.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::CodeBuilder]
// ============================================================================
//! Architecture-dependent \ref CodeBuilder targeting X86 and X64.
class ASMJIT_VIRTAPI X86Builder
: public CodeBuilder,
public X86EmitterImplicitT<X86Builder> {
public:
ASMJIT_NONCOPYABLE(X86Builder)
typedef CodeBuilder Base;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `X86Builder` instance.
ASMJIT_API X86Builder(CodeHolder* code = nullptr) noexcept;
//! Destroy the `X86Builder` instance.
ASMJIT_API ~X86Builder() noexcept;
// --------------------------------------------------------------------------
// [Compatibility]
// --------------------------------------------------------------------------
//! Explicit cast to `X86Emitter`.
ASMJIT_INLINE X86Emitter* asEmitter() noexcept { return reinterpret_cast<X86Emitter*>(this); }
//! Explicit cast to `X86Emitter` (const).
ASMJIT_INLINE const X86Emitter* asEmitter() const noexcept { return reinterpret_cast<const X86Emitter*>(this); }
//! Implicit cast to `X86Emitter`.
ASMJIT_INLINE operator X86Emitter&() noexcept { return *asEmitter(); }
//! Implicit cast to `X86Emitter` (const).
ASMJIT_INLINE operator const X86Emitter&() const noexcept { return *asEmitter(); }
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override;
// --------------------------------------------------------------------------
// [Code-Generation]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) override;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_BUILDER
#endif // _ASMJIT_X86_X86BUILDER_H

View file

@ -0,0 +1,376 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/utils.h"
#include "../x86/x86compiler.h"
#include "../x86/x86regalloc_p.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86Compiler - Construction / Destruction]
// ============================================================================
X86Compiler::X86Compiler(CodeHolder* code) noexcept : CodeCompiler() {
if (code)
code->attach(this);
}
X86Compiler::~X86Compiler() noexcept {}
// ============================================================================
// [asmjit::X86Compiler - Events]
// ============================================================================
Error X86Compiler::onAttach(CodeHolder* code) noexcept {
uint32_t archType = code->getArchType();
if (!ArchInfo::isX86Family(archType))
return DebugUtils::errored(kErrorInvalidArch);
ASMJIT_PROPAGATE(_cbPasses.willGrow(&_cbHeap, 1));
ASMJIT_PROPAGATE(Base::onAttach(code));
if (archType == ArchInfo::kTypeX86)
_nativeGpArray = x86OpData.gpd;
else
_nativeGpArray = x86OpData.gpq;
_nativeGpReg = _nativeGpArray[0];
return addPassT<X86RAPass>();
}
// ============================================================================
// [asmjit::X86Compiler - Finalize]
// ============================================================================
Error X86Compiler::finalize() {
if (_lastError) return _lastError;
// Flush the global constant pool.
if (_globalConstPool) {
addNode(_globalConstPool);
_globalConstPool = nullptr;
}
Error err = kErrorOk;
ZoneVector<CBPass*>& passes = _cbPasses;
for (size_t i = 0, len = passes.getLength(); i < len; i++) {
CBPass* pass = passes[i];
err = pass->process(&_cbPassZone);
_cbPassZone.reset();
if (err) break;
}
_cbPassZone.reset();
if (ASMJIT_UNLIKELY(err)) return setLastError(err);
// TODO: There must be possibility to attach more assemblers, this is not so nice.
if (_code->_cgAsm) {
return serialize(_code->_cgAsm);
}
else {
X86Assembler a(_code);
return serialize(&a);
}
}
// ============================================================================
// [asmjit::X86Compiler - Inst]
// ============================================================================
static ASMJIT_INLINE bool isJumpInst(uint32_t instId) noexcept {
return (instId >= X86Inst::kIdJa && instId <= X86Inst::kIdJz ) ||
(instId >= X86Inst::kIdLoop && instId <= X86Inst::kIdLoopne) ;
}
Error X86Compiler::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
uint32_t options = getOptions() | getGlobalOptions();
const char* inlineComment = getInlineComment();
uint32_t opCount = static_cast<uint32_t>(!o0.isNone()) +
static_cast<uint32_t>(!o1.isNone()) +
static_cast<uint32_t>(!o2.isNone()) +
static_cast<uint32_t>(!o3.isNone()) ;
// Handle failure and rare cases first.
const uint32_t kErrorsAndSpecialCases = kOptionMaybeFailureCase | // CodeEmitter is in error state.
kOptionStrictValidation ; // Strict validation.
if (ASMJIT_UNLIKELY(options & kErrorsAndSpecialCases)) {
// Don't do anything if we are in error state.
if (_lastError) return _lastError;
#if !defined(ASMJIT_DISABLE_VALIDATION)
// Strict validation.
if (options & kOptionStrictValidation) {
Operand opArray[] = {
Operand(o0),
Operand(o1),
Operand(o2),
Operand(o3)
};
Inst::Detail instDetail(instId, options, _extraReg);
Error err = Inst::validate(getArchType(), instDetail, opArray, opCount);
if (err) {
#if !defined(ASMJIT_DISABLE_LOGGING)
StringBuilderTmp<256> sb;
sb.appendString(DebugUtils::errorAsString(err));
sb.appendString(": ");
Logging::formatInstruction(sb, 0, this, getArchType(), instDetail, opArray, opCount);
return setLastError(err, sb.getData());
#else
return setLastError(err);
#endif
}
// Clear it as it must be enabled explicitly on assembler side.
options &= ~kOptionStrictValidation;
}
#endif // ASMJIT_DISABLE_VALIDATION
}
resetOptions();
resetInlineComment();
// decide between `CBInst` and `CBJump`.
if (isJumpInst(instId)) {
CBJump* node = _cbHeap.allocT<CBJump>(sizeof(CBJump) + opCount * sizeof(Operand));
Operand* opArray = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(node) + sizeof(CBJump));
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
if (opCount > 0) opArray[0].copyFrom(o0);
if (opCount > 1) opArray[1].copyFrom(o1);
if (opCount > 2) opArray[2].copyFrom(o2);
if (opCount > 3) opArray[3].copyFrom(o3);
new(node) CBJump(this, instId, options, opArray, opCount);
node->_instDetail.extraReg = _extraReg;
_extraReg.reset();
CBLabel* jTarget = nullptr;
if (!(options & kOptionUnfollow)) {
if (opArray[0].isLabel()) {
Error err = getCBLabel(&jTarget, static_cast<Label&>(opArray[0]));
if (err) return setLastError(err);
}
else {
options |= kOptionUnfollow;
}
}
node->setOptions(options);
node->orFlags(instId == X86Inst::kIdJmp ? CBNode::kFlagIsJmp | CBNode::kFlagIsTaken : CBNode::kFlagIsJcc);
node->_target = jTarget;
node->_jumpNext = nullptr;
if (jTarget) {
node->_jumpNext = static_cast<CBJump*>(jTarget->_from);
jTarget->_from = node;
jTarget->addNumRefs();
}
// The 'jmp' is always taken, conditional jump can contain hint, we detect it.
if (instId == X86Inst::kIdJmp)
node->orFlags(CBNode::kFlagIsTaken);
else if (options & X86Inst::kOptionTaken)
node->orFlags(CBNode::kFlagIsTaken);
if (inlineComment) {
inlineComment = static_cast<char*>(_cbDataZone.dup(inlineComment, ::strlen(inlineComment), true));
node->setInlineComment(inlineComment);
}
addNode(node);
return kErrorOk;
}
else {
CBInst* node = _cbHeap.allocT<CBInst>(sizeof(CBInst) + opCount * sizeof(Operand));
Operand* opArray = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(node) + sizeof(CBInst));
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
if (opCount > 0) opArray[0].copyFrom(o0);
if (opCount > 1) opArray[1].copyFrom(o1);
if (opCount > 2) opArray[2].copyFrom(o2);
if (opCount > 3) opArray[3].copyFrom(o3);
node = new(node) CBInst(this, instId, options, opArray, opCount);
node->_instDetail.extraReg = _extraReg;
_extraReg.reset();
if (inlineComment) {
inlineComment = static_cast<char*>(_cbDataZone.dup(inlineComment, ::strlen(inlineComment), true));
node->setInlineComment(inlineComment);
}
addNode(node);
return kErrorOk;
}
}
Error X86Compiler::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
uint32_t options = getOptions() | getGlobalOptions();
const char* inlineComment = getInlineComment();
uint32_t opCount = static_cast<uint32_t>(!o0.isNone()) +
static_cast<uint32_t>(!o1.isNone()) +
static_cast<uint32_t>(!o2.isNone()) +
static_cast<uint32_t>(!o3.isNone()) ;
// Count 5th and 6th operands.
if (!o4.isNone()) opCount = 5;
if (!o5.isNone()) opCount = 6;
// Handle failure and rare cases first.
const uint32_t kErrorsAndSpecialCases = kOptionMaybeFailureCase | // CodeEmitter in error state.
kOptionStrictValidation ; // Strict validation.
if (ASMJIT_UNLIKELY(options & kErrorsAndSpecialCases)) {
// Don't do anything if we are in error state.
if (_lastError) return _lastError;
#if !defined(ASMJIT_DISABLE_VALIDATION)
// Strict validation.
if (options & kOptionStrictValidation) {
Operand opArray[] = {
Operand(o0),
Operand(o1),
Operand(o2),
Operand(o3),
Operand(o4),
Operand(o5)
};
Inst::Detail instDetail(instId, options, _extraReg);
Error err = Inst::validate(getArchType(), instDetail, opArray, opCount);
if (err) {
#if !defined(ASMJIT_DISABLE_LOGGING)
StringBuilderTmp<256> sb;
sb.appendString(DebugUtils::errorAsString(err));
sb.appendString(": ");
Logging::formatInstruction(sb, 0, this, getArchType(), instDetail, opArray, opCount);
return setLastError(err, sb.getData());
#else
return setLastError(err);
#endif
}
// Clear it as it must be enabled explicitly on assembler side.
options &= ~kOptionStrictValidation;
}
#endif // ASMJIT_DISABLE_VALIDATION
}
resetOptions();
resetInlineComment();
// decide between `CBInst` and `CBJump`.
if (isJumpInst(instId)) {
CBJump* node = _cbHeap.allocT<CBJump>(sizeof(CBJump) + opCount * sizeof(Operand));
Operand* opArray = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(node) + sizeof(CBJump));
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
if (opCount > 0) opArray[0].copyFrom(o0);
if (opCount > 1) opArray[1].copyFrom(o1);
if (opCount > 2) opArray[2].copyFrom(o2);
if (opCount > 3) opArray[3].copyFrom(o3);
if (opCount > 4) opArray[4].copyFrom(o4);
if (opCount > 5) opArray[5].copyFrom(o5);
new(node) CBJump(this, instId, options, opArray, opCount);
node->_instDetail.extraReg = _extraReg;
_extraReg.reset();
CBLabel* jTarget = nullptr;
if (!(options & kOptionUnfollow)) {
if (opArray[0].isLabel()) {
Error err = getCBLabel(&jTarget, static_cast<Label&>(opArray[0]));
if (err) return setLastError(err);
}
else {
options |= kOptionUnfollow;
}
}
node->setOptions(options);
node->orFlags(instId == X86Inst::kIdJmp ? CBNode::kFlagIsJmp | CBNode::kFlagIsTaken : CBNode::kFlagIsJcc);
node->_target = jTarget;
node->_jumpNext = nullptr;
if (jTarget) {
node->_jumpNext = static_cast<CBJump*>(jTarget->_from);
jTarget->_from = node;
jTarget->addNumRefs();
}
// The 'jmp' is always taken, conditional jump can contain hint, we detect it.
if (instId == X86Inst::kIdJmp)
node->orFlags(CBNode::kFlagIsTaken);
else if (options & X86Inst::kOptionTaken)
node->orFlags(CBNode::kFlagIsTaken);
if (inlineComment) {
inlineComment = static_cast<char*>(_cbDataZone.dup(inlineComment, ::strlen(inlineComment), true));
node->setInlineComment(inlineComment);
}
addNode(node);
return kErrorOk;
}
else {
CBInst* node = _cbHeap.allocT<CBInst>(sizeof(CBInst) + opCount * sizeof(Operand));
Operand* opArray = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(node) + sizeof(CBInst));
if (ASMJIT_UNLIKELY(!node))
return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
if (opCount > 0) opArray[0].copyFrom(o0);
if (opCount > 1) opArray[1].copyFrom(o1);
if (opCount > 2) opArray[2].copyFrom(o2);
if (opCount > 3) opArray[3].copyFrom(o3);
if (opCount > 4) opArray[4].copyFrom(o4);
if (opCount > 5) opArray[5].copyFrom(o5);
node = new(node) CBInst(this, instId, options, opArray, opCount);
node->_instDetail.extraReg = _extraReg;
_extraReg.reset();
if (inlineComment) {
inlineComment = static_cast<char*>(_cbDataZone.dup(inlineComment, ::strlen(inlineComment), true));
node->setInlineComment(inlineComment);
}
addNode(node);
return kErrorOk;
}
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86 && !ASMJIT_DISABLE_COMPILER

View file

@ -0,0 +1,293 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86COMPILER_H
#define _ASMJIT_X86_X86COMPILER_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/codecompiler.h"
#include "../base/simdtypes.h"
#include "../x86/x86emitter.h"
#include "../x86/x86misc.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::X86Compiler]
// ============================================================================
//! Architecture-dependent \ref CodeCompiler targeting X86 and X64.
class ASMJIT_VIRTAPI X86Compiler
: public CodeCompiler,
public X86EmitterExplicitT<X86Compiler> {
public:
ASMJIT_NONCOPYABLE(X86Compiler)
typedef CodeCompiler Base;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `X86Compiler` instance.
ASMJIT_API X86Compiler(CodeHolder* code = nullptr) noexcept;
//! Destroy the `X86Compiler` instance.
ASMJIT_API ~X86Compiler() noexcept;
// --------------------------------------------------------------------------
// [Compatibility]
// --------------------------------------------------------------------------
//! Explicit cast to `X86Emitter`.
ASMJIT_INLINE X86Emitter* asEmitter() noexcept { return reinterpret_cast<X86Emitter*>(this); }
//! Explicit cast to `X86Emitter` (const).
ASMJIT_INLINE const X86Emitter* asEmitter() const noexcept { return reinterpret_cast<const X86Emitter*>(this); }
//! Implicit cast to `X86Emitter`.
ASMJIT_INLINE operator X86Emitter&() noexcept { return *asEmitter(); }
//! Implicit cast to `X86Emitter` (const).
ASMJIT_INLINE operator const X86Emitter&() const noexcept { return *asEmitter(); }
// --------------------------------------------------------------------------
// [Events]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override;
// --------------------------------------------------------------------------
// [Code-Generation]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) override;
ASMJIT_API virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) override;
// -------------------------------------------------------------------------
// [Finalize]
// -------------------------------------------------------------------------
ASMJIT_API virtual Error finalize() override;
// --------------------------------------------------------------------------
// [VirtReg]
// --------------------------------------------------------------------------
#if !defined(ASMJIT_DISABLE_LOGGING)
#define ASMJIT_NEW_REG(OUT, PARAM, NAME_FMT) \
va_list ap; \
va_start(ap, NAME_FMT); \
_newReg(OUT, PARAM, NAME_FMT, ap); \
va_end(ap)
#else
#define ASMJIT_NEW_REG(OUT, PARAM, NAME_FMT) \
ASMJIT_UNUSED(NAME_FMT); \
_newReg(OUT, PARAM, nullptr)
#endif
#define ASMJIT_NEW_REG_USER(FUNC, REG) \
ASMJIT_INLINE REG FUNC(uint32_t typeId) { \
REG reg(NoInit); \
_newReg(reg, typeId, nullptr); \
return reg; \
} \
\
REG FUNC(uint32_t typeId, const char* nameFmt, ...) { \
REG reg(NoInit); \
ASMJIT_NEW_REG(reg, typeId, nameFmt); \
return reg; \
}
#define ASMJIT_NEW_REG_AUTO(FUNC, REG, TYPE_ID) \
ASMJIT_INLINE REG FUNC() { \
REG reg(NoInit); \
_newReg(reg, TYPE_ID, nullptr); \
return reg; \
} \
\
REG FUNC(const char* nameFmt, ...) { \
REG reg(NoInit); \
ASMJIT_NEW_REG(reg, TYPE_ID, nameFmt); \
return reg; \
}
template<typename RegT>
ASMJIT_INLINE RegT newSimilarReg(const RegT& ref) {
RegT reg(NoInit);
_newReg(reg, ref, nullptr);
return reg;
}
template<typename RegT>
RegT newSimilarReg(const RegT& ref, const char* nameFmt, ...) {
RegT reg(NoInit);
ASMJIT_NEW_REG(reg, ref, nameFmt);
return reg;
}
ASMJIT_NEW_REG_USER(newReg , X86Reg )
ASMJIT_NEW_REG_USER(newGpReg , X86Gp )
ASMJIT_NEW_REG_USER(newMmReg , X86Mm )
ASMJIT_NEW_REG_USER(newKReg , X86KReg)
ASMJIT_NEW_REG_USER(newXmmReg , X86Xmm )
ASMJIT_NEW_REG_USER(newYmmReg , X86Ymm )
ASMJIT_NEW_REG_USER(newZmmReg , X86Zmm )
ASMJIT_NEW_REG_AUTO(newI8 , X86Gp , TypeId::kI8 )
ASMJIT_NEW_REG_AUTO(newU8 , X86Gp , TypeId::kU8 )
ASMJIT_NEW_REG_AUTO(newI16 , X86Gp , TypeId::kI16 )
ASMJIT_NEW_REG_AUTO(newU16 , X86Gp , TypeId::kU16 )
ASMJIT_NEW_REG_AUTO(newI32 , X86Gp , TypeId::kI32 )
ASMJIT_NEW_REG_AUTO(newU32 , X86Gp , TypeId::kU32 )
ASMJIT_NEW_REG_AUTO(newI64 , X86Gp , TypeId::kI64 )
ASMJIT_NEW_REG_AUTO(newU64 , X86Gp , TypeId::kU64 )
ASMJIT_NEW_REG_AUTO(newInt8 , X86Gp , TypeId::kI8 )
ASMJIT_NEW_REG_AUTO(newUInt8 , X86Gp , TypeId::kU8 )
ASMJIT_NEW_REG_AUTO(newInt16 , X86Gp , TypeId::kI16 )
ASMJIT_NEW_REG_AUTO(newUInt16 , X86Gp , TypeId::kU16 )
ASMJIT_NEW_REG_AUTO(newInt32 , X86Gp , TypeId::kI32 )
ASMJIT_NEW_REG_AUTO(newUInt32 , X86Gp , TypeId::kU32 )
ASMJIT_NEW_REG_AUTO(newInt64 , X86Gp , TypeId::kI64 )
ASMJIT_NEW_REG_AUTO(newUInt64 , X86Gp , TypeId::kU64 )
ASMJIT_NEW_REG_AUTO(newIntPtr , X86Gp , TypeId::kIntPtr )
ASMJIT_NEW_REG_AUTO(newUIntPtr, X86Gp , TypeId::kUIntPtr)
ASMJIT_NEW_REG_AUTO(newGpb , X86Gp , TypeId::kU8 )
ASMJIT_NEW_REG_AUTO(newGpw , X86Gp , TypeId::kU16 )
ASMJIT_NEW_REG_AUTO(newGpd , X86Gp , TypeId::kU32 )
ASMJIT_NEW_REG_AUTO(newGpq , X86Gp , TypeId::kU64 )
ASMJIT_NEW_REG_AUTO(newGpz , X86Gp , TypeId::kUIntPtr)
ASMJIT_NEW_REG_AUTO(newKb , X86KReg, TypeId::kMask8 )
ASMJIT_NEW_REG_AUTO(newKw , X86KReg, TypeId::kMask16 )
ASMJIT_NEW_REG_AUTO(newKd , X86KReg, TypeId::kMask32 )
ASMJIT_NEW_REG_AUTO(newKq , X86KReg, TypeId::kMask64 )
ASMJIT_NEW_REG_AUTO(newMm , X86Mm , TypeId::kMmx64 )
ASMJIT_NEW_REG_AUTO(newXmm , X86Xmm , TypeId::kI32x4 )
ASMJIT_NEW_REG_AUTO(newXmmSs , X86Xmm , TypeId::kF32x1 )
ASMJIT_NEW_REG_AUTO(newXmmSd , X86Xmm , TypeId::kF64x1 )
ASMJIT_NEW_REG_AUTO(newXmmPs , X86Xmm , TypeId::kF32x4 )
ASMJIT_NEW_REG_AUTO(newXmmPd , X86Xmm , TypeId::kF64x2 )
ASMJIT_NEW_REG_AUTO(newYmm , X86Ymm , TypeId::kI32x8 )
ASMJIT_NEW_REG_AUTO(newYmmPs , X86Ymm , TypeId::kF32x8 )
ASMJIT_NEW_REG_AUTO(newYmmPd , X86Ymm , TypeId::kF64x4 )
ASMJIT_NEW_REG_AUTO(newZmm , X86Zmm , TypeId::kI32x16 )
ASMJIT_NEW_REG_AUTO(newZmmPs , X86Zmm , TypeId::kF32x16 )
ASMJIT_NEW_REG_AUTO(newZmmPd , X86Zmm , TypeId::kF64x8 )
#undef ASMJIT_NEW_REG_AUTO
#undef ASMJIT_NEW_REG_USER
#undef ASMJIT_NEW_REG
// --------------------------------------------------------------------------
// [Stack]
// --------------------------------------------------------------------------
//! Create a new memory chunk allocated on the current function's stack.
ASMJIT_INLINE X86Mem newStack(uint32_t size, uint32_t alignment, const char* name = nullptr) {
X86Mem m(NoInit);
_newStack(m, size, alignment, name);
return m;
}
// --------------------------------------------------------------------------
// [Const]
// --------------------------------------------------------------------------
//! Put data to a constant-pool and get a memory reference to it.
ASMJIT_INLINE X86Mem newConst(uint32_t scope, const void* data, size_t size) {
X86Mem m(NoInit);
_newConst(m, scope, data, size);
return m;
}
//! Put a BYTE `val` to a constant-pool.
ASMJIT_INLINE X86Mem newByteConst(uint32_t scope, uint8_t val) noexcept { return newConst(scope, &val, 1); }
//! Put a WORD `val` to a constant-pool.
ASMJIT_INLINE X86Mem newWordConst(uint32_t scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a DWORD `val` to a constant-pool.
ASMJIT_INLINE X86Mem newDWordConst(uint32_t scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a QWORD `val` to a constant-pool.
ASMJIT_INLINE X86Mem newQWordConst(uint32_t scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a WORD `val` to a constant-pool.
ASMJIT_INLINE X86Mem newInt16Const(uint32_t scope, int16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a WORD `val` to a constant-pool.
ASMJIT_INLINE X86Mem newUInt16Const(uint32_t scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
//! Put a DWORD `val` to a constant-pool.
ASMJIT_INLINE X86Mem newInt32Const(uint32_t scope, int32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a DWORD `val` to a constant-pool.
ASMJIT_INLINE X86Mem newUInt32Const(uint32_t scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
//! Put a QWORD `val` to a constant-pool.
ASMJIT_INLINE X86Mem newInt64Const(uint32_t scope, int64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a QWORD `val` to a constant-pool.
ASMJIT_INLINE X86Mem newUInt64Const(uint32_t scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
//! Put a SP-FP `val` to a constant-pool.
ASMJIT_INLINE X86Mem newFloatConst(uint32_t scope, float val) noexcept { return newConst(scope, &val, 4); }
//! Put a DP-FP `val` to a constant-pool.
ASMJIT_INLINE X86Mem newDoubleConst(uint32_t scope, double val) noexcept { return newConst(scope, &val, 8); }
//! Put a MMX `val` to a constant-pool.
ASMJIT_INLINE X86Mem newMmConst(uint32_t scope, const Data64& val) noexcept { return newConst(scope, &val, 8); }
//! Put a XMM `val` to a constant-pool.
ASMJIT_INLINE X86Mem newXmmConst(uint32_t scope, const Data128& val) noexcept { return newConst(scope, &val, 16); }
//! Put a YMM `val` to a constant-pool.
ASMJIT_INLINE X86Mem newYmmConst(uint32_t scope, const Data256& val) noexcept { return newConst(scope, &val, 32); }
// -------------------------------------------------------------------------
// [Instruction Options]
// -------------------------------------------------------------------------
//! Force the compiler to not follow the conditional or unconditional jump.
ASMJIT_INLINE X86Compiler& unfollow() noexcept { _options |= kOptionUnfollow; return *this; }
//! Tell the compiler that the destination variable will be overwritten.
ASMJIT_INLINE X86Compiler& overwrite() noexcept { _options |= kOptionOverwrite; return *this; }
// --------------------------------------------------------------------------
// [Emit]
// --------------------------------------------------------------------------
//! Call a function.
ASMJIT_INLINE CCFuncCall* call(const X86Gp& dst, const FuncSignature& sign) { return addCall(X86Inst::kIdCall, dst, sign); }
//! \overload
ASMJIT_INLINE CCFuncCall* call(const X86Mem& dst, const FuncSignature& sign) { return addCall(X86Inst::kIdCall, dst, sign); }
//! \overload
ASMJIT_INLINE CCFuncCall* call(const Label& label, const FuncSignature& sign) { return addCall(X86Inst::kIdCall, label, sign); }
//! \overload
ASMJIT_INLINE CCFuncCall* call(const Imm& dst, const FuncSignature& sign) { return addCall(X86Inst::kIdCall, dst, sign); }
//! \overload
ASMJIT_INLINE CCFuncCall* call(uint64_t dst, const FuncSignature& sign) { return addCall(X86Inst::kIdCall, Imm(dst), sign); }
//! Return.
ASMJIT_INLINE CCFuncRet* ret() { return addRet(Operand(), Operand()); }
//! \overload
ASMJIT_INLINE CCFuncRet* ret(const X86Gp& o0) { return addRet(o0, Operand()); }
//! \overload
ASMJIT_INLINE CCFuncRet* ret(const X86Gp& o0, const X86Gp& o1) { return addRet(o0, o1); }
//! \overload
ASMJIT_INLINE CCFuncRet* ret(const X86Xmm& o0) { return addRet(o0, Operand()); }
//! \overload
ASMJIT_INLINE CCFuncRet* ret(const X86Xmm& o0, const X86Xmm& o1) { return addRet(o0, o1); }
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_X86_X86COMPILER_H

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,506 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86GLOBALS_H
#define _ASMJIT_X86_X86GLOBALS_H
// [Dependencies]
#include "../base/globals.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::x86regs::]
// ============================================================================
//! X86 registers.
namespace x86regs {}
// ============================================================================
// [asmjit::x86defs::]
// ============================================================================
//! X86 definitions.
namespace x86defs {
// ============================================================================
// [asmjit::x86defs::SpecialRegs]
// ============================================================================
//! Flags describing special registers and/or their parts.
ASMJIT_ENUM(SpecialRegs) {
kSpecialReg_FLAGS_CF = 0x00000001U, //!< [R|E]FLAGS - Carry flag.
kSpecialReg_FLAGS_PF = 0x00000002U, //!< [R|E]FLAGS - Parity flag.
kSpecialReg_FLAGS_AF = 0x00000004U, //!< [R|E]FLAGS - Adjust flag.
kSpecialReg_FLAGS_ZF = 0x00000008U, //!< [R|E]FLAGS - Zero flag.
kSpecialReg_FLAGS_SF = 0x00000010U, //!< [R|E]FLAGS - Sign flag.
kSpecialReg_FLAGS_TF = 0x00000020U, //!< [R|E]FLAGS - Trap flag.
kSpecialReg_FLAGS_IF = 0x00000040U, //!< [R|E]FLAGS - Interrupt enable flag.
kSpecialReg_FLAGS_DF = 0x00000080U, //!< [R|E]FLAGS - Direction flag.
kSpecialReg_FLAGS_OF = 0x00000100U, //!< [R|E]FLAGS - Overflow flag.
kSpecialReg_FLAGS_AC = 0x00000200U, //!< [R|E]FLAGS - Alignment check.
kSpecialReg_FLAGS_SYS = 0x00000400U, //!< [R|E]FLAGS - System flags.
kSpecialReg_X87CW_EXC = 0x00000800U, //!< X87 Control Word - Exception control.
kSpecialReg_X87CW_PC = 0x00001000U, //!< X87 Control Word - Precision control.
kSpecialReg_X87CW_RC = 0x00002000U, //!< X87 Control Word - Rounding control.
kSpecialReg_X87SW_EXC = 0x00004000U, //!< X87 Status Word - Exception flags.
kSpecialReg_X87SW_C0 = 0x00008000U, //!< X87 Status Word - C0 flag.
kSpecialReg_X87SW_C1 = 0x00010000U, //!< X87 Status Word - C1 flag.
kSpecialReg_X87SW_C2 = 0x00020000U, //!< X87 Status Word - C2 flag.
kSpecialReg_X87SW_TOP = 0x00040000U, //!< X87 Status Word - Top of the FPU stack.
kSpecialReg_X87SW_C3 = 0x00080000U, //!< X87 Status Word - C3 flag.
kSpecialReg_MSR = 0x00100000U, //!< MSR register.
kSpecialReg_XCR = 0x00200000U //!< XCR register.
};
// ============================================================================
// [asmjit::x86defs::X87SW]
// ============================================================================
//! FPU status word.
ASMJIT_ENUM(X87SW) {
kX87SW_Invalid = 0x0001U,
kX87SW_Denormalized = 0x0002U,
kX87SW_DivByZero = 0x0004U,
kX87SW_Overflow = 0x0008U,
kX87SW_Underflow = 0x0010U,
kX87SW_Precision = 0x0020U,
kX87SW_StackFault = 0x0040U,
kX87SW_Interrupt = 0x0080U,
kX87SW_C0 = 0x0100U,
kX87SW_C1 = 0x0200U,
kX87SW_C2 = 0x0400U,
kX87SW_Top = 0x3800U,
kX87SW_C3 = 0x4000U,
kX87SW_Busy = 0x8000U
};
// ============================================================================
// [asmjit::x86defs::X87CW]
// ============================================================================
//! FPU control word.
ASMJIT_ENUM(X87CW) {
// Bits 0-5.
kX87CW_EM_Mask = 0x003FU,
kX87CW_EM_Invalid = 0x0001U,
kX87CW_EM_Denormal = 0x0002U,
kX87CW_EM_DivByZero = 0x0004U,
kX87CW_EM_Overflow = 0x0008U,
kX87CW_EM_Underflow = 0x0010U,
kX87CW_EM_Inexact = 0x0020U,
// Bits 8-9.
kX87CW_PC_Mask = 0x0300U,
kX87CW_PC_Float = 0x0000U,
kX87CW_PC_Reserved = 0x0100U,
kX87CW_PC_Double = 0x0200U,
kX87CW_PC_Extended = 0x0300U,
// Bits 10-11.
kX87CW_RC_Mask = 0x0C00U,
kX87CW_RC_Nearest = 0x0000U,
kX87CW_RC_Down = 0x0400U,
kX87CW_RC_Up = 0x0800U,
kX87CW_RC_Truncate = 0x0C00U,
// Bit 12.
kX87CW_IC_Mask = 0x1000U,
kX87CW_IC_Projective = 0x0000U,
kX87CW_IC_Affine = 0x1000U
};
// ============================================================================
// [asmjit::x86defs::Cond]
// ============================================================================
//! Condition codes.
ASMJIT_ENUM(Cond) {
kCondO = 0x00U, //!< OF==1
kCondNO = 0x01U, //!< OF==0
kCondB = 0x02U, //!< CF==1 (unsigned < )
kCondC = 0x02U, //!< CF==1
kCondNAE = 0x02U, //!< CF==1 (unsigned < )
kCondAE = 0x03U, //!< CF==0 (unsigned >=)
kCondNB = 0x03U, //!< CF==0 (unsigned >=)
kCondNC = 0x03U, //!< CF==0
kCondE = 0x04U, //!< ZF==1 (any_sign ==)
kCondZ = 0x04U, //!< ZF==1 (any_sign ==)
kCondNE = 0x05U, //!< ZF==0 (any_sign !=)
kCondNZ = 0x05U, //!< ZF==0 (any_sign !=)
kCondBE = 0x06U, //!< CF==1 | ZF==1 (unsigned <=)
kCondNA = 0x06U, //!< CF==1 | ZF==1 (unsigned <=)
kCondA = 0x07U, //!< CF==0 & ZF==0 (unsigned > )
kCondNBE = 0x07U, //!< CF==0 & ZF==0 (unsigned > )
kCondS = 0x08U, //!< SF==1 (is negative)
kCondNS = 0x09U, //!< SF==0 (is positive or zero)
kCondP = 0x0AU, //!< PF==1
kCondPE = 0x0AU, //!< PF==1
kCondPO = 0x0BU, //!< PF==0
kCondNP = 0x0BU, //!< PF==0
kCondL = 0x0CU, //!< SF!=OF (signed < )
kCondNGE = 0x0CU, //!< SF!=OF (signed < )
kCondGE = 0x0DU, //!< SF==OF (signed >=)
kCondNL = 0x0DU, //!< SF==OF (signed >=)
kCondLE = 0x0EU, //!< ZF==1 | SF!=OF (signed <=)
kCondNG = 0x0EU, //!< ZF==1 | SF!=OF (signed <=)
kCondG = 0x0FU, //!< ZF==0 & SF==OF (signed > )
kCondNLE = 0x0FU, //!< ZF==0 & SF==OF (signed > )
kCondCount = 0x10U,
// Simplified condition codes.
kCondSign = kCondS, //!< Sign.
kCondNotSign = kCondNS, //!< Not Sign.
kCondOverflow = kCondO, //!< Signed overflow.
kCondNotOverflow = kCondNO, //!< Not signed overflow.
kCondEqual = kCondE, //!< Equal `a == b`.
kCondNotEqual = kCondNE, //!< Not Equal `a != b`.
kCondSignedLT = kCondL, //!< Signed `a < b`.
kCondSignedLE = kCondLE, //!< Signed `a <= b`.
kCondSignedGT = kCondG, //!< Signed `a > b`.
kCondSignedGE = kCondGE, //!< Signed `a >= b`.
kCondUnsignedLT = kCondB, //!< Unsigned `a < b`.
kCondUnsignedLE = kCondBE, //!< Unsigned `a <= b`.
kCondUnsignedGT = kCondA, //!< Unsigned `a > b`.
kCondUnsignedGE = kCondAE, //!< Unsigned `a >= b`.
kCondZero = kCondZ,
kCondNotZero = kCondNZ,
kCondNegative = kCondS,
kCondPositive = kCondNS,
kCondParityEven = kCondP,
kCondParityOdd = kCondPO
};
// ============================================================================
// [asmjit::x86defs::CmpPredicate]
// ============================================================================
//! A predicate used by CMP[PD|PS|SD|SS] instructions.
ASMJIT_ENUM(CmpPredicate) {
kCmpEQ = 0x00U, //!< Equal (Quiet).
kCmpLT = 0x01U, //!< Less (Signaling).
kCmpLE = 0x02U, //!< Less/Equal (Signaling).
kCmpUNORD = 0x03U, //!< Unordered (Quiet).
kCmpNEQ = 0x04U, //!< Not Equal (Quiet).
kCmpNLT = 0x05U, //!< Not Less (Signaling).
kCmpNLE = 0x06U, //!< Not Less/Equal (Signaling).
kCmpORD = 0x07U //!< Ordered (Quiet).
};
// ============================================================================
// [asmjit::x86defs::VCmpPredicate]
// ============================================================================
//! A predicate used by VCMP[PD|PS|SD|SS] instructions.
//!
//! The first 8 values are compatible with \ref CmpPredicate.
ASMJIT_ENUM(VCmpPredicate) {
kVCmpEQ_OQ = 0x00U, //!< Equal (Quiet , Ordered).
kVCmpLT_OS = 0x01U, //!< Less (Signaling, Ordered).
kVCmpLE_OS = 0x02U, //!< Less/Equal (Signaling, Ordered).
kVCmpUNORD_Q = 0x03U, //!< Unordered (Quiet).
kVCmpNEQ_UQ = 0x04U, //!< Not Equal (Quiet , Unordered).
kVCmpNLT_US = 0x05U, //!< Not Less (Signaling, Unordered).
kVCmpNLE_US = 0x06U, //!< Not Less/Equal (Signaling, Unordered).
kVCmpORD_Q = 0x07U, //!< Ordered (Quiet).
kVCmpEQ_UQ = 0x08U, //!< Equal (Quiet , Unordered).
kVCmpNGE_US = 0x09U, //!< Not Greater/Equal (Signaling, Unordered).
kVCmpNGT_US = 0x0AU, //!< Not Greater (Signaling, Unordered).
kVCmpFALSE_OQ = 0x0BU, //!< False (Quiet , Ordered).
kVCmpNEQ_OQ = 0x0CU, //!< Not Equal (Quiet , Ordered).
kVCmpGE_OS = 0x0DU, //!< Greater/Equal (Signaling, Ordered).
kVCmpGT_OS = 0x0EU, //!< Greater (Signaling, Ordered).
kVCmpTRUE_UQ = 0x0FU, //!< True (Quiet , Unordered).
kVCmpEQ_OS = 0x10U, //!< Equal (Signaling, Ordered).
kVCmpLT_OQ = 0x11U, //!< Less (Quiet , Ordered).
kVCmpLE_OQ = 0x12U, //!< Less/Equal (Quiet , Ordered).
kVCmpUNORD_S = 0x13U, //!< Unordered (Signaling).
kVCmpNEQ_US = 0x14U, //!< Not Equal (Signaling, Unordered).
kVCmpNLT_UQ = 0x15U, //!< Not Less (Quiet , Unordered).
kVCmpNLE_UQ = 0x16U, //!< Not Less/Equal (Quiet , Unordered).
kVCmpORD_S = 0x17U, //!< Ordered (Signaling).
kVCmpEQ_US = 0x18U, //!< Equal (Signaling, Unordered).
kVCmpNGE_UQ = 0x19U, //!< Not Greater/Equal (Quiet , Unordered).
kVCmpNGT_UQ = 0x1AU, //!< Not Greater (Quiet , Unordered).
kVCmpFALSE_OS = 0x1BU, //!< False (Signaling, Ordered).
kVCmpNEQ_OS = 0x1CU, //!< Not Equal (Signaling, Ordered).
kVCmpGE_OQ = 0x1DU, //!< Greater/Equal (Quiet , Ordered).
kVCmpGT_OQ = 0x1EU, //!< Greater (Quiet , Ordered).
kVCmpTRUE_US = 0x1FU //!< True (Signaling, Unordered).
};
// ============================================================================
// [asmjit::x86defs::PCmpStrPredicate]
// ============================================================================
//! A predicate used by [V]PCMP[I|E]STR[I|M] instructions.
ASMJIT_ENUM(PCmpStrPredicate) {
// Source data format:
kPCmpStrUB = 0x00U << 0, //!< The source data format is unsigned bytes.
kPCmpStrUW = 0x01U << 0, //!< The source data format is unsigned words.
kPCmpStrSB = 0x02U << 0, //!< The source data format is signed bytes.
kPCmpStrSW = 0x03U << 0, //!< The source data format is signed words.
// Aggregation operation:
kPCmpStrEqualAny = 0x00U << 2, //!< The arithmetic comparison is "equal".
kPCmpStrRanges = 0x01U << 2, //!< The arithmetic comparison is “greater than or equal”
//!< between even indexed elements and “less than or equal”
//!< between odd indexed elements.
kPCmpStrEqualEach = 0x02U << 2, //!< The arithmetic comparison is "equal".
kPCmpStrEqualOrdered = 0x03U << 2, //!< The arithmetic comparison is "equal".
// Polarity:
kPCmpStrPosPolarity = 0x00U << 4, //!< IntRes2 = IntRes1.
kPCmpStrNegPolarity = 0x01U << 4, //!< IntRes2 = -1 XOR IntRes1.
kPCmpStrPosMasked = 0x02U << 4, //!< IntRes2 = IntRes1.
kPCmpStrNegMasked = 0x03U << 4, //!< IntRes2[i] = second[i] == invalid ? IntRes1[i] : ~IntRes1[i].
// Output selection (pcmpstri):
kPCmpStrOutputLSI = 0x00U << 6, //!< The index returned to ECX is of the least significant set bit in IntRes2.
kPCmpStrOutputMSI = 0x01U << 6, //!< The index returned to ECX is of the most significant set bit in IntRes2.
// Output selection (pcmpstrm):
kPCmpStrBitMask = 0x00U << 6, //!< IntRes2 is returned as the mask to the least significant bits of XMM0.
kPCmpStrIndexMask = 0x01U << 6 //!< IntRes2 is expanded into a byte/word mask and placed in XMM0.
};
// ============================================================================
// [asmjit::x86defs::VPCmpPredicate]
// ============================================================================
//! A predicate used by VPCMP[U][B|W|D|Q] instructions (AVX-512).
ASMJIT_ENUM(VPCmpPredicate) {
kVPCmpEQ = 0x00U, //!< Equal.
kVPCmpLT = 0x01U, //!< Less.
kVPCmpLE = 0x02U, //!< Less/Equal.
kVPCmpFALSE = 0x03U, //!< False.
kVPCmpNE = 0x04U, //!< Not Equal.
kVPCmpGE = 0x05U, //!< Greater/Equal.
kVPCmpGT = 0x06U, //!< Greater.
kVPCmpTRUE = 0x07U //!< True.
};
// ============================================================================
// [asmjit::x86defs::VPComPredicate]
// ============================================================================
//! A predicate used by VPCOM[U][B|W|D|Q] instructions (XOP).
ASMJIT_ENUM(VPComPredicate) {
kVPComLT = 0x00U, //!< Less.
kVPComLE = 0x01U, //!< Less/Equal
kVPComGT = 0x02U, //!< Greater.
kVPComGE = 0x03U, //!< Greater/Equal.
kVPComEQ = 0x04U, //!< Equal.
kVPComNE = 0x05U, //!< Not Equal.
kVPComFALSE = 0x06U, //!< False.
kVPComTRUE = 0x07U //!< True.
};
// ============================================================================
// [asmjit::x86defs::VFPClassPredicate]
// ============================================================================
//! A predicate used by VFPCLASS[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VFPClassPredicate) {
kVFPClassQNaN = 0x00U,
kVFPClassPZero = 0x01U,
kVFPClassNZero = 0x02U,
kVFPClassPInf = 0x03U,
kVFPClassNInf = 0x04U,
kVFPClassDenormal = 0x05U,
kVFPClassNegative = 0x06U,
kVFPClassSNaN = 0x07U
};
// ============================================================================
// [asmjit::x86defs::VFixupImmPredicate]
// ============================================================================
//! A predicate used by VFIXUPIMM[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VFixupImmPredicate) {
kVFixupImmZEOnZero = 0x01U,
kVFixupImmIEOnZero = 0x02U,
kVFixupImmZEOnOne = 0x04U,
kVFixupImmIEOnOne = 0x08U,
kVFixupImmIEOnSNaN = 0x10U,
kVFixupImmIEOnNInf = 0x20U,
kVFixupImmIEOnNegative= 0x40U,
kVFixupImmIEOnPInf = 0x80U
};
// ============================================================================
// [asmjit::x86defs::VGetMantPredicate]
// ============================================================================
//! A predicate used by VGETMANT[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VGetMantPredicate) {
kVGetMant1To2 = 0x00U,
kVGetMant1Div2To2 = 0x01U,
kVGetMant1Div2To1 = 0x02U,
kVGetMant3Div4To3Div2 = 0x03U,
kVGetMantNoSign = 0x04U,
kVGetMantQNaNIfSign = 0x08U
};
// ============================================================================
// [asmjit::x86defs::VRangePredicate]
// ============================================================================
//! A predicate used by VRANGE[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VRangePredicate) {
kVRangeSelectMin = 0x00U, //!< Select minimum value.
kVRangeSelectMax = 0x01U, //!< Select maximum value.
kVRangeSelectAbsMin = 0x02U, //!< Select minimum absolute value.
kVRangeSelectAbsMax = 0x03U, //!< Select maximum absolute value.
kVRangeSignSrc1 = 0x00U, //!< Select sign of SRC1.
kVRangeSignSrc2 = 0x04U, //!< Select sign of SRC2.
kVRangeSign0 = 0x08U, //!< Set sign to 0.
kVRangeSign1 = 0x0CU //!< Set sign to 1.
};
// ============================================================================
// [asmjit::x86defs::VReducePredicate]
// ============================================================================
//! A predicate used by VREDUCE[PD|PS|SD|SS] instructions (AVX-512).
ASMJIT_ENUM(VReducePredicate) {
kVReduceRoundCurrent = 0x00U, //!< Round to the current mode set.
kVReduceRoundEven = 0x04U, //!< Round to nearest even.
kVReduceRoundDown = 0x05U, //!< Round down.
kVReduceRoundUp = 0x06U, //!< Round up.
kVReduceRoundTrunc = 0x07U, //!< Truncate.
kVReduceSuppress = 0x08U //!< Suppress exceptions.
};
// ============================================================================
// [asmjit::x86defs::TLogPredicate]
// ============================================================================
//! A predicate that can be used to create an immediate for VTERNLOG[D|Q].
ASMJIT_ENUM(TLogPredicate) {
kTLog0 = 0x00U,
kTLog1 = 0xFFU,
kTLogA = 0xF0U,
kTLogB = 0xCCU,
kTLogC = 0xAAU,
kTLogNotA = kTLogA ^ 0xFFU,
kTLogNotB = kTLogB ^ 0xFFU,
kTLogNotC = kTLogC ^ 0xFFU,
kTLogAB = kTLogA & kTLogB,
kTLogAC = kTLogA & kTLogC,
kTLogBC = kTLogB & kTLogC,
kTLogNotAB = kTLogAB ^ 0xFFU,
kTLogNotAC = kTLogAC ^ 0xFFU,
kTLogNotBC = kTLogBC ^ 0xFFU,
kTLogABC = kTLogA & kTLogB & kTLogC,
kTLogNotABC = kTLogABC ^ 0xFFU
};
// ============================================================================
// [asmjit::x86defs::RoundPredicate]
// ============================================================================
//! A predicate used by ROUND[PD|PS|SD|SS] instructions.
ASMJIT_ENUM(RoundPredicate) {
kRoundNearest = 0x00U, //!< Round to nearest (even).
kRoundDown = 0x01U, //!< Round to down toward -INF (floor),
kRoundUp = 0x02U, //!< Round to up toward +INF (ceil).
kRoundTrunc = 0x03U, //!< Round toward zero (truncate).
kRoundCurrent = 0x04U, //!< Round to the current rounding mode set (ignores other RC bits).
kRoundInexact = 0x08U //!< Avoids inexact exception, if set.
};
} // x86defs namespace
// ============================================================================
// [asmjit::x86::]
// ============================================================================
//! X86 constants, registers, and utilities.
namespace x86 {
// Include all x86 specific namespaces here.
using namespace x86defs;
using namespace x86regs;
//! Pack a shuffle constant to be used by SSE/AVX/AVX-512 instructions (2 values).
//!
//! \param a Position of the first component [0, 1].
//! \param b Position of the second component [0, 1].
//!
//! Shuffle constants can be used to encode an immediate for these instructions:
//! - `shufpd`
static ASMJIT_INLINE int shufImm(uint32_t a, uint32_t b) noexcept {
ASMJIT_ASSERT(a <= 1 && b <= 1);
return static_cast<int>((a << 1) | b);
}
//! Pack a shuffle constant to be used by SSE/AVX/AVX-512 instructions (4 values).
//!
//! \param a Position of the first component [0, 3].
//! \param b Position of the second component [0, 3].
//! \param c Position of the third component [0, 3].
//! \param d Position of the fourth component [0, 3].
//!
//! Shuffle constants can be used to encode an immediate for these instructions:
//! - `pshufw()`
//! - `pshufd()`
//! - `pshuflw()`
//! - `pshufhw()`
//! - `shufps()`
static ASMJIT_INLINE int shufImm(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept {
ASMJIT_ASSERT(a <= 3 && b <= 3 && c <= 3 && d <= 3);
return static_cast<int>((a << 6) | (b << 4) | (c << 2) | d);
}
//! Create an immediate that can be used by VTERNLOG[D|Q] instructions.
static ASMJIT_INLINE int tlogImm(
uint32_t b000, uint32_t b001, uint32_t b010, uint32_t b011,
uint32_t b100, uint32_t b101, uint32_t b110, uint32_t b111) noexcept {
ASMJIT_ASSERT(b000 <= 1 && b001 <= 1 && b010 <= 1 && b011 <= 1 &&
b100 <= 1 && b101 <= 1 && b110 <= 1 && b111 <= 1);
return static_cast<int>((b000 << 0) | (b001 << 1) | (b010 << 2) | (b011 << 3) |
(b100 << 4) | (b101 << 5) | (b110 << 6) | (b111 << 7));
}
//! Create an immediate that can be used by VTERNLOG[D|Q] instructions.
static ASMJIT_INLINE int tlogVal(int x) noexcept { return x & 0xFF; }
//! Negate an immediate that can be used by VTERNLOG[D|Q] instructions.
static ASMJIT_INLINE int tlogNot(int x) noexcept { return x ^ 0xFF; }
//! Create an if/else logic that can be used by VTERNLOG[D|Q] instructions.
static ASMJIT_INLINE int tlogIf(int cond, int a, int b) noexcept { return (cond & a) | (tlogNot(cond) & b); }
} // x86 namespace
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_X86_X86GLOBALS_H

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,733 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies]
#include "../base/misc_p.h"
#include "../base/utils.h"
#include "../x86/x86instimpl_p.h"
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
#pragma warning(disable: 4838) // warning C4838: conversion from '' to 'uint32_t' requires a narrowing conversion
namespace asmjit {
// ============================================================================
// [asmjit::X86InstImpl - Validate]
// ============================================================================
#if !defined(ASMJIT_DISABLE_VALIDATION)
template<uint32_t RegType>
struct X86OpTypeFromRegTypeT {
enum {
kValue = (RegType == X86Reg::kRegGpbLo) ? X86Inst::kOpGpbLo :
(RegType == X86Reg::kRegGpbHi) ? X86Inst::kOpGpbHi :
(RegType == X86Reg::kRegGpw ) ? X86Inst::kOpGpw :
(RegType == X86Reg::kRegGpd ) ? X86Inst::kOpGpd :
(RegType == X86Reg::kRegGpq ) ? X86Inst::kOpGpq :
(RegType == X86Reg::kRegXmm ) ? X86Inst::kOpXmm :
(RegType == X86Reg::kRegYmm ) ? X86Inst::kOpYmm :
(RegType == X86Reg::kRegZmm ) ? X86Inst::kOpZmm :
(RegType == X86Reg::kRegRip ) ? X86Inst::kOpNone :
(RegType == X86Reg::kRegSeg ) ? X86Inst::kOpSeg :
(RegType == X86Reg::kRegFp ) ? X86Inst::kOpFp :
(RegType == X86Reg::kRegMm ) ? X86Inst::kOpMm :
(RegType == X86Reg::kRegK ) ? X86Inst::kOpK :
(RegType == X86Reg::kRegBnd ) ? X86Inst::kOpBnd :
(RegType == X86Reg::kRegCr ) ? X86Inst::kOpCr :
(RegType == X86Reg::kRegDr ) ? X86Inst::kOpDr : X86Inst::kOpNone
};
};
template<uint32_t RegType>
struct X86RegMaskFromRegTypeT {
enum {
kMask = (RegType == X86Reg::kRegGpbLo) ? 0x0000000FU :
(RegType == X86Reg::kRegGpbHi) ? 0x0000000FU :
(RegType == X86Reg::kRegGpw ) ? 0x000000FFU :
(RegType == X86Reg::kRegGpd ) ? 0x000000FFU :
(RegType == X86Reg::kRegGpq ) ? 0x000000FFU :
(RegType == X86Reg::kRegXmm ) ? 0x000000FFU :
(RegType == X86Reg::kRegYmm ) ? 0x000000FFU :
(RegType == X86Reg::kRegZmm ) ? 0x000000FFU :
(RegType == X86Reg::kRegRip ) ? 0x00000001U :
(RegType == X86Reg::kRegSeg ) ? 0x0000007EU : // [ES|CS|SS|DS|FS|GS]
(RegType == X86Reg::kRegFp ) ? 0x000000FFU :
(RegType == X86Reg::kRegMm ) ? 0x000000FFU :
(RegType == X86Reg::kRegK ) ? 0x000000FFU :
(RegType == X86Reg::kRegBnd ) ? 0x0000000FU :
(RegType == X86Reg::kRegCr ) ? 0x0000FFFFU :
(RegType == X86Reg::kRegDr ) ? 0x000000FFU : X86Inst::kOpNone
};
};
template<uint32_t RegType>
struct X64RegMaskFromRegTypeT {
enum {
kMask = (RegType == X86Reg::kRegGpbLo) ? 0x0000FFFFU :
(RegType == X86Reg::kRegGpbHi) ? 0x0000000FU :
(RegType == X86Reg::kRegGpw ) ? 0x0000FFFFU :
(RegType == X86Reg::kRegGpd ) ? 0x0000FFFFU :
(RegType == X86Reg::kRegGpq ) ? 0x0000FFFFU :
(RegType == X86Reg::kRegXmm ) ? 0xFFFFFFFFU :
(RegType == X86Reg::kRegYmm ) ? 0xFFFFFFFFU :
(RegType == X86Reg::kRegZmm ) ? 0xFFFFFFFFU :
(RegType == X86Reg::kRegRip ) ? 0x00000001U :
(RegType == X86Reg::kRegSeg ) ? 0x0000007EU : // [ES|CS|SS|DS|FS|GS]
(RegType == X86Reg::kRegFp ) ? 0x000000FFU :
(RegType == X86Reg::kRegMm ) ? 0x000000FFU :
(RegType == X86Reg::kRegK ) ? 0x000000FFU :
(RegType == X86Reg::kRegBnd ) ? 0x0000000FU :
(RegType == X86Reg::kRegCr ) ? 0x0000FFFFU :
(RegType == X86Reg::kRegDr ) ? 0x0000FFFFU : X86Inst::kOpNone
};
};
struct X86ValidationData {
//! Allowed registers by reg-type (X86::kReg...).
uint32_t allowedRegMask[X86Reg::kRegMax + 1];
uint32_t allowedMemBaseRegs;
uint32_t allowedMemIndexRegs;
};
static const uint32_t _x86OpFlagFromRegType[X86Reg::kRegMax + 1] = {
ASMJIT_TABLE_T_32(X86OpTypeFromRegTypeT, kValue, 0)
};
static const X86ValidationData _x86ValidationData = {
{ ASMJIT_TABLE_T_32(X86RegMaskFromRegTypeT, kMask, 0) },
(1U << X86Reg::kRegGpw) | (1U << X86Reg::kRegGpd) | (1U << X86Reg::kRegRip) | (1U << Label::kLabelTag),
(1U << X86Reg::kRegGpw) | (1U << X86Reg::kRegGpd) | (1U << X86Reg::kRegXmm) | (1U << X86Reg::kRegYmm) | (1U << X86Reg::kRegZmm)
};
static const X86ValidationData _x64ValidationData = {
{ ASMJIT_TABLE_T_32(X64RegMaskFromRegTypeT, kMask, 0) },
(1U << X86Reg::kRegGpd) | (1U << X86Reg::kRegGpq) | (1U << X86Reg::kRegRip) | (1U << Label::kLabelTag),
(1U << X86Reg::kRegGpd) | (1U << X86Reg::kRegGpq) | (1U << X86Reg::kRegXmm) | (1U << X86Reg::kRegYmm) | (1U << X86Reg::kRegZmm)
};
static ASMJIT_INLINE bool x86CheckOSig(const X86Inst::OSignature& op, const X86Inst::OSignature& ref, bool& immOutOfRange) noexcept {
// Fail if operand types are incompatible.
uint32_t opFlags = op.flags;
if ((opFlags & ref.flags) == 0) {
// Mark temporarily `immOutOfRange` so we can return a more descriptive error.
if ((opFlags & X86Inst::kOpAllImm) && (ref.flags & X86Inst::kOpAllImm)) {
immOutOfRange = true;
return true;
}
return false;
}
// Fail if memory specific flags and sizes are incompatibles.
uint32_t opMemFlags = op.memFlags;
if (opMemFlags != 0) {
uint32_t refMemFlags = ref.memFlags;
if ((refMemFlags & opMemFlags) == 0)
return false;
if ((refMemFlags & X86Inst::kMemOpBaseOnly) && !(opMemFlags && X86Inst::kMemOpBaseOnly))
return false;
}
// Specific register index.
if (opFlags & X86Inst::kOpAllRegs) {
uint32_t refRegMask = ref.regMask;
if (refRegMask && !(op.regMask & refRegMask))
return false;
}
return true;
}
ASMJIT_FAVOR_SIZE Error X86InstImpl::validate(uint32_t archType, const Inst::Detail& detail, const Operand_* operands, uint32_t count) noexcept {
uint32_t i;
uint32_t archMask;
const X86ValidationData* vd;
if (!ArchInfo::isX86Family(archType))
return DebugUtils::errored(kErrorInvalidArch);
if (archType == ArchInfo::kTypeX86) {
vd = &_x86ValidationData;
archMask = X86Inst::kArchMaskX86;
}
else {
vd = &_x64ValidationData;
archMask = X86Inst::kArchMaskX64;
}
// Get the instruction data.
uint32_t instId = detail.instId;
uint32_t options = detail.options;
if (ASMJIT_UNLIKELY(instId >= X86Inst::_kIdCount))
return DebugUtils::errored(kErrorInvalidArgument);
const X86Inst* iData = &X86InstDB::instData[instId];
uint32_t iFlags = iData->getFlags();
// Validate LOCK, XACQUIRE, and XRELEASE prefixes.
const uint32_t kLockXAcqRel = X86Inst::kOptionXAcquire | X86Inst::kOptionXRelease;
if (options & (X86Inst::kOptionLock | kLockXAcqRel)) {
if (options & X86Inst::kOptionLock) {
if (ASMJIT_UNLIKELY(!(iFlags & X86Inst::kFlagLock) && !(options & kLockXAcqRel)))
return DebugUtils::errored(kErrorInvalidLockPrefix);
if (ASMJIT_UNLIKELY(count < 1 || !operands[0].isMem()))
return DebugUtils::errored(kErrorInvalidLockPrefix);
}
if (options & kLockXAcqRel) {
if (ASMJIT_UNLIKELY(!(options & X86Inst::kOptionLock) || (options & kLockXAcqRel) == kLockXAcqRel))
return DebugUtils::errored(kErrorInvalidPrefixCombination);
if (ASMJIT_UNLIKELY((options & X86Inst::kOptionXAcquire) && !(iFlags & X86Inst::kFlagXAcquire)))
return DebugUtils::errored(kErrorInvalidXAcquirePrefix);
if (ASMJIT_UNLIKELY((options & X86Inst::kOptionXRelease) && !(iFlags & X86Inst::kFlagXRelease)))
return DebugUtils::errored(kErrorInvalidXReleasePrefix);
}
}
// Validate REP and REPNZ prefixes.
const uint32_t kRepRepRepnz = X86Inst::kOptionRep | X86Inst::kOptionRepnz;
if (options & kRepRepRepnz) {
if (ASMJIT_UNLIKELY((options & kRepRepRepnz) == kRepRepRepnz))
return DebugUtils::errored(kErrorInvalidPrefixCombination);
if (ASMJIT_UNLIKELY((options & X86Inst::kOptionRep) && !(iFlags & X86Inst::kFlagRep)))
return DebugUtils::errored(kErrorInvalidRepPrefix);
if (ASMJIT_UNLIKELY((options & X86Inst::kOptionRepnz) && !(iFlags & X86Inst::kFlagRepnz)))
return DebugUtils::errored(kErrorInvalidRepPrefix);
// TODO: Validate extraReg {cx|ecx|rcx}.
}
// Translate the given operands to `X86Inst::OSignature`.
X86Inst::OSignature oSigTranslated[6];
uint32_t combinedOpFlags = 0;
uint32_t combinedRegMask = 0;
const X86Mem* memOp = nullptr;
for (i = 0; i < count; i++) {
const Operand_& op = operands[i];
if (op.getOp() == Operand::kOpNone) break;
uint32_t opFlags = 0;
uint32_t memFlags = 0;
uint32_t regMask = 0;
switch (op.getOp()) {
case Operand::kOpReg: {
uint32_t regType = op.as<Reg>().getType();
if (ASMJIT_UNLIKELY(regType >= X86Reg::kRegCount))
return DebugUtils::errored(kErrorInvalidRegType);
opFlags = _x86OpFlagFromRegType[regType];
if (ASMJIT_UNLIKELY(opFlags == 0))
return DebugUtils::errored(kErrorInvalidRegType);
// If `regId` is equal or greater than Operand::kPackedIdMin it means
// that the register is virtual and its index will be assigned later
// by the register allocator. We must pass unless asked to disallow
// virtual registers.
// TODO: We need an option to refuse virtual regs here.
uint32_t regId = op.getId();
if (regId < Operand::kPackedIdMin) {
if (ASMJIT_UNLIKELY(regId >= 32))
return DebugUtils::errored(kErrorInvalidPhysId);
regMask = Utils::mask(regId);
if (ASMJIT_UNLIKELY((vd->allowedRegMask[regType] & regMask) == 0))
return DebugUtils::errored(kErrorInvalidPhysId);
combinedRegMask |= regMask;
}
else {
regMask = 0xFFFFFFFFU;
}
break;
}
// TODO: Validate base and index and combine with `combinedRegMask`.
case Operand::kOpMem: {
const X86Mem& m = op.as<X86Mem>();
uint32_t baseType = m.getBaseType();
uint32_t indexType = m.getIndexType();
memOp = &m;
if (m.getSegmentId() > 6)
return DebugUtils::errored(kErrorInvalidSegment);
if (baseType) {
uint32_t baseId = m.getBaseId();
if (m.isRegHome()) {
// Home address of virtual register. In such case we don't want to
// validate the type of the base register as it will always be patched
// to ESP|RSP.
}
else {
if (ASMJIT_UNLIKELY((vd->allowedMemBaseRegs & (1U << baseType)) == 0))
return DebugUtils::errored(kErrorInvalidAddress);
}
// Create information that will be validated only if this is an implicit
// memory operand. Basically only usable for string instructions and other
// instructions where memory operand is implicit and has 'seg:[reg]' form.
if (baseId < Operand::kPackedIdMin) {
// Physical base id.
regMask = Utils::mask(baseId);
combinedRegMask |= regMask;
}
else {
// Virtual base id - will the whole mask for implicit mem validation.
// The register is not assigned yet, so we cannot predict the phys id.
regMask = 0xFFFFFFFFU;
}
if (!indexType && !m.getOffsetLo32())
memFlags |= X86Inst::kMemOpBaseOnly;
}
else {
// Base is an address, make sure that the address doesn't overflow 32-bit
// integer (either int32_t or uint32_t) in 32-bit targets.
int64_t offset = m.getOffset();
if (archMask == X86Inst::kArchMaskX86 && !Utils::isInt32(offset) && !Utils::isUInt32(offset))
return DebugUtils::errored(kErrorInvalidAddress);
}
if (indexType) {
if (ASMJIT_UNLIKELY((vd->allowedMemIndexRegs & (1U << indexType)) == 0))
return DebugUtils::errored(kErrorInvalidAddress);
if (indexType == X86Reg::kRegXmm) {
opFlags |= X86Inst::kOpVm;
memFlags |= X86Inst::kMemOpVm32x | X86Inst::kMemOpVm64x;
}
else if (indexType == X86Reg::kRegYmm) {
opFlags |= X86Inst::kOpVm;
memFlags |= X86Inst::kMemOpVm32y | X86Inst::kMemOpVm64y;
}
else if (indexType == X86Reg::kRegZmm) {
opFlags |= X86Inst::kOpVm;
memFlags |= X86Inst::kMemOpVm32z | X86Inst::kMemOpVm64z;
}
else {
opFlags |= X86Inst::kOpMem;
if (baseType)
memFlags |= X86Inst::kMemOpMib;
}
// [RIP + {XMM|YMM|ZMM}] is not allowed.
if (baseType == X86Reg::kRegRip && (opFlags & X86Inst::kOpVm))
return DebugUtils::errored(kErrorInvalidAddress);
uint32_t indexId = m.getIndexId();
if (indexId < Operand::kPackedIdMin)
combinedRegMask |= Utils::mask(indexId);
// Only used for implicit memory operands having 'seg:[reg]' form, so clear it.
regMask = 0;
}
else {
opFlags |= X86Inst::kOpMem;
}
switch (m.getSize()) {
case 0: memFlags |= X86Inst::kMemOpAny ; break;
case 1: memFlags |= X86Inst::kMemOpM8 ; break;
case 2: memFlags |= X86Inst::kMemOpM16 ; break;
case 4: memFlags |= X86Inst::kMemOpM32 ; break;
case 6: memFlags |= X86Inst::kMemOpM48 ; break;
case 8: memFlags |= X86Inst::kMemOpM64 ; break;
case 10: memFlags |= X86Inst::kMemOpM80 ; break;
case 16: memFlags |= X86Inst::kMemOpM128; break;
case 32: memFlags |= X86Inst::kMemOpM256; break;
case 64: memFlags |= X86Inst::kMemOpM512; break;
default:
return DebugUtils::errored(kErrorInvalidOperandSize);
}
break;
}
case Operand::kOpImm: {
uint64_t immValue = op.as<Imm>().getUInt64();
uint32_t immFlags = 0;
if (static_cast<int64_t>(immValue) >= 0) {
const uint32_t k32AndMore = X86Inst::kOpI32 | X86Inst::kOpU32 |
X86Inst::kOpI64 | X86Inst::kOpU64 ;
if (immValue <= 0xFU)
immFlags = X86Inst::kOpU4 | X86Inst::kOpI8 | X86Inst::kOpU8 | X86Inst::kOpI16 | X86Inst::kOpU16 | k32AndMore;
else if (immValue <= 0x7FU)
immFlags = X86Inst::kOpI8 | X86Inst::kOpU8 | X86Inst::kOpI16 | X86Inst::kOpU16 | k32AndMore;
else if (immValue <= 0xFFU)
immFlags = X86Inst::kOpU8 | X86Inst::kOpI16 | X86Inst::kOpU16 | k32AndMore;
else if (immValue <= 0x7FFFU)
immFlags = X86Inst::kOpI16 | X86Inst::kOpU16 | k32AndMore;
else if (immValue <= 0xFFFFU)
immFlags = X86Inst::kOpU16 | k32AndMore;
else if (immValue <= 0x7FFFFFFFU)
immFlags = k32AndMore;
else if (immValue <= 0xFFFFFFFFU)
immFlags = X86Inst::kOpU32 | X86Inst::kOpI64 | X86Inst::kOpU64;
else if (immValue <= ASMJIT_UINT64_C(0x7FFFFFFFFFFFFFFF))
immFlags = X86Inst::kOpI64 | X86Inst::kOpU64;
else
immFlags = X86Inst::kOpU64;
}
else {
// 2s complement negation, as our number is unsigned...
immValue = (~immValue + 1);
if (immValue <= 0x80U)
immFlags = X86Inst::kOpI8 | X86Inst::kOpI16 | X86Inst::kOpI32 | X86Inst::kOpI64;
else if (immValue <= 0x8000U)
immFlags = X86Inst::kOpI16 | X86Inst::kOpI32 | X86Inst::kOpI64;
else if (immValue <= 0x80000000U)
immFlags = X86Inst::kOpI32 | X86Inst::kOpI64;
else
immFlags = X86Inst::kOpI64;
}
opFlags |= immFlags;
break;
}
case Operand::kOpLabel: {
opFlags |= X86Inst::kOpRel8 | X86Inst::kOpRel32;
break;
}
default:
return DebugUtils::errored(kErrorInvalidState);
}
X86Inst::OSignature& tod = oSigTranslated[i];
tod.flags = opFlags;
tod.memFlags = static_cast<uint16_t>(memFlags);
tod.regMask = static_cast<uint8_t>(regMask & 0xFFU);
combinedOpFlags |= opFlags;
}
// Decrease the number of operands of those that are none. This is important
// as Assembler and CodeCompiler may just pass more operands where some of
// them are none (it means that no operand is given at that index). However,
// validate that there are no gaps (like [reg, none, reg] or [none, reg]).
if (i < count) {
while (--count > i)
if (ASMJIT_UNLIKELY(!operands[count].isNone()))
return DebugUtils::errored(kErrorInvalidState);
}
// Validate X86 and X64 specific cases.
if (archMask == X86Inst::kArchMaskX86) {
// Illegal use of 64-bit register in 32-bit mode.
if (ASMJIT_UNLIKELY((combinedOpFlags & X86Inst::kOpGpq) != 0))
return DebugUtils::errored(kErrorInvalidUseOfGpq);
}
else {
// Illegal use of a high 8-bit register with REX prefix.
if (ASMJIT_UNLIKELY((combinedOpFlags & X86Inst::kOpGpbHi) != 0 && (combinedRegMask & 0xFFFFFF00U) != 0))
return DebugUtils::errored(kErrorInvalidUseOfGpbHi);
}
// Validate instruction operands.
const X86Inst::CommonData* commonData = &iData->getCommonData();
const X86Inst::ISignature* iSig = X86InstDB::iSignatureData + commonData->_iSignatureIndex;
const X86Inst::ISignature* iEnd = iSig + commonData->_iSignatureCount;
if (iSig != iEnd) {
const X86Inst::OSignature* oSigData = X86InstDB::oSignatureData;
// If set it means that we matched a signature where only immediate value
// was out of bounds. We can return a more descriptive error if we know this.
bool globalImmOutOfRange = false;
do {
// Check if the architecture is compatible.
if ((iSig->archMask & archMask) == 0) continue;
// Compare the operands table with reference operands.
uint32_t j = 0;
uint32_t iSigCount = iSig->opCount;
bool localImmOutOfRange = false;
if (iSigCount == count) {
for (j = 0; j < count; j++)
if (!x86CheckOSig(oSigTranslated[j], oSigData[iSig->operands[j]], localImmOutOfRange))
break;
}
else if (iSigCount - iSig->implicit == count) {
uint32_t r = 0;
for (j = 0; j < count && r < iSigCount; j++, r++) {
const X86Inst::OSignature* oChk = oSigTranslated + j;
const X86Inst::OSignature* oRef;
Next:
oRef = oSigData + iSig->operands[r];
// Skip implicit.
if ((oRef->flags & X86Inst::kOpImplicit) != 0) {
if (++r >= iSigCount)
break;
else
goto Next;
}
if (!x86CheckOSig(*oChk, *oRef, localImmOutOfRange))
break;
}
}
if (j == count) {
if (!localImmOutOfRange) {
// Match, must clear possible `globalImmOutOfRange`.
globalImmOutOfRange = false;
break;
}
globalImmOutOfRange = localImmOutOfRange;
}
} while (++iSig != iEnd);
if (iSig == iEnd) {
if (globalImmOutOfRange)
return DebugUtils::errored(kErrorInvalidImmediate);
else
return DebugUtils::errored(kErrorInvalidInstruction);
}
}
// Validate AVX-512 options:
const RegOnly& extraReg = detail.extraReg;
const uint32_t kAvx512Options = X86Inst::kOptionZMask |
X86Inst::kOption1ToX |
X86Inst::kOptionER |
X86Inst::kOptionSAE ;
if (!extraReg.isNone() || (options & kAvx512Options)) {
if (commonData->hasFlag(X86Inst::kFlagEvex)) {
// Validate AVX-512 {k} and {k}{z}.
if (!extraReg.isNone()) {
// Mask can only be specified by a 'k' register.
if (ASMJIT_UNLIKELY(extraReg.getType() != X86Reg::kRegK))
return DebugUtils::errored(kErrorInvalidKMaskReg);
if (ASMJIT_UNLIKELY(!commonData->hasAvx512K()))
return DebugUtils::errored(kErrorInvalidKMaskUse);
}
if ((options & X86Inst::kOptionZMask)) {
if (ASMJIT_UNLIKELY((options & X86Inst::kOptionZMask) != 0 && !commonData->hasAvx512Z()))
return DebugUtils::errored(kErrorInvalidKZeroUse);
}
// Validate AVX-512 broadcast {1tox}.
if (options & X86Inst::kOption1ToX) {
if (ASMJIT_UNLIKELY(!memOp))
return DebugUtils::errored(kErrorInvalidBroadcast);
uint32_t size = memOp->getSize();
if (size != 0) {
// The the size is specified it has to match the broadcast size.
if (ASMJIT_UNLIKELY(commonData->hasAvx512B32() && size != 4))
return DebugUtils::errored(kErrorInvalidBroadcast);
if (ASMJIT_UNLIKELY(commonData->hasAvx512B64() && size != 8))
return DebugUtils::errored(kErrorInvalidBroadcast);
}
}
// Validate AVX-512 {sae} and {er}.
if (options & (X86Inst::kOptionSAE | X86Inst::kOptionER)) {
// Rounding control is impossible if the instruction is not reg-to-reg.
if (ASMJIT_UNLIKELY(memOp))
return DebugUtils::errored(kErrorInvalidEROrSAE);
// Check if {sae} or {er} is supported by the instruction.
if (options & X86Inst::kOptionER) {
// NOTE: if both {sae} and {er} are set, we don't care, as {sae} is implied.
if (ASMJIT_UNLIKELY(!commonData->hasAvx512ER()))
return DebugUtils::errored(kErrorInvalidEROrSAE);
// {er} is defined for scalar ops or vector ops using zmm (LL = 10). We
// don't need any more bits in the instruction database to be able to
// validate this, as each AVX512 instruction that has broadcast is vector
// instruction (in this case we require zmm registers), otherwise it's a
// scalar instruction, which is valid.
if (commonData->hasAvx512B()) {
// Supports broadcast, thus we require LL to be '10', which means there
// have to be zmm registers used. We don't calculate LL here, but we know
// that it would be '10' if there is at least one ZMM register used.
// There is no 'ER' enabled instruction with less than two operands.
ASMJIT_ASSERT(count >= 2);
if (ASMJIT_UNLIKELY(!X86Reg::isZmm(operands[0]) && !X86Reg::isZmm(operands[1])))
return DebugUtils::errored(kErrorInvalidEROrSAE);
}
}
else {
// {sae} doesn't have the same limitations as {er}, this is enough.
if (ASMJIT_UNLIKELY(!commonData->hasAvx512SAE()))
return DebugUtils::errored(kErrorInvalidEROrSAE);
}
}
}
else {
// Not AVX512 instruction - maybe OpExtra is xCX register used by REP/REPNZ prefix. Otherwise the instruction is invalid.
if ((options & kAvx512Options) || (options & (X86Inst::kOptionRep | X86Inst::kOptionRepnz)) == 0)
return DebugUtils::errored(kErrorInvalidInstruction);
}
}
return kErrorOk;
}
#endif
// ============================================================================
// [asmjit::X86InstImpl - CheckFeatures]
// ============================================================================
#if !defined(ASMJIT_DISABLE_EXTENSIONS)
ASMJIT_FAVOR_SIZE static uint32_t x86GetRegTypesMask(const Operand_* operands, uint32_t count) noexcept {
uint32_t mask = 0;
for (uint32_t i = 0; i < count; i++) {
const Operand_& op = operands[i];
if (op.isReg()) {
const Reg& reg = op.as<Reg>();
mask |= Utils::mask(reg.getType());
}
else if (op.isMem()) {
const Mem& mem = op.as<Mem>();
if (mem.hasBaseReg()) mask |= Utils::mask(mem.getBaseType());
if (mem.hasIndexReg()) mask |= Utils::mask(mem.getIndexType());
}
}
return mask;
}
ASMJIT_FAVOR_SIZE Error X86InstImpl::checkFeatures(uint32_t archType, const Inst::Detail& detail, const Operand_* operands, uint32_t count, CpuFeatures& out) noexcept {
if (!ArchInfo::isX86Family(archType))
return DebugUtils::errored(kErrorInvalidArch);
// Get the instruction data.
uint32_t instId = detail.instId;
if (ASMJIT_UNLIKELY(instId >= X86Inst::_kIdCount))
return DebugUtils::errored(kErrorInvalidArgument);
const X86Inst* iData = &X86InstDB::instData[instId];
const X86Inst::OperationData& od = iData->getOperationData();
const uint8_t* fData = od.getFeaturesData();
const uint8_t* fEnd = od.getFeaturesEnd();
// Copy all features to `out`.
out.reset();
do {
uint32_t feature = fData[0];
if (!feature)
break;
out.add(feature);
} while (++fData != fEnd);
// Since AsmJit merges all instructions that share the same name we have to
// deal with some special cases and also with MMX/SSE and AVX/AVX2 overlaps.
// Only proceed if there were some CPU flags set.
if (fData != od.getFeaturesData()) {
uint32_t mask = x86GetRegTypesMask(operands, count);
// Check for MMX vs SSE overlap.
if (out.has(CpuInfo::kX86FeatureMMX) || out.has(CpuInfo::kX86FeatureMMX2)) {
// Only instructions defined by SSE and SSE2 overlap. Instructions introduced
// by newer instruction sets like SSE3+ don't state MMX as they require SSE3+.
if (out.has(CpuInfo::kX86FeatureSSE) || out.has(CpuInfo::kX86FeatureSSE2)) {
if (!(mask & Utils::mask(X86Reg::kRegXmm))) {
// The instruction doesn't use XMM register(s), thus it's MMX/MMX2 only.
out.remove(CpuInfo::kX86FeatureSSE);
out.remove(CpuInfo::kX86FeatureSSE2);
}
else {
out.remove(CpuInfo::kX86FeatureMMX);
out.remove(CpuInfo::kX86FeatureMMX2);
}
// Special case: PEXTRW instruction is MMX/SSE2 instruction. However, this
// instruction couldn't access memory (only register to register extract) so
// when SSE4.1 introduced the whole family of PEXTR/PINSR instructions they
// also introduced PEXTRW with a new opcode 0x15 that can extract directly to
// memory. This instruction is, of course, not compatible with MMX/SSE2 one.
if (instId == X86Inst::kIdPextrw && count > 0 && !operands[0].isMem()) {
out.remove(CpuInfo::kX86FeatureSSE4_1);
}
}
}
// Check for AVX vs AVX2 overlap.
if (out.has(CpuInfo::kX86FeatureAVX) && out.has(CpuInfo::kX86FeatureAVX2)) {
bool isAVX2 = true;
// Special case: VBROADCASTSS and VBROADCASTSD were introduced in AVX, but
// only version that uses memory as a source operand. AVX2 then added support
// for register source operand.
if (instId == X86Inst::kIdVbroadcastss || instId == X86Inst::kIdVbroadcastsd) {
if (count > 1 && operands[0].isMem())
isAVX2 = false;
}
else {
// AVX instruction set doesn't support integer operations on YMM registers
// as these were later introcuced by AVX2. In our case we have to check if
// YMM register(s) are in use and if that is the case this is an AVX2 instruction.
if (!(mask & Utils::mask(X86Reg::kRegYmm, X86Reg::kRegZmm)))
isAVX2 = false;
}
if (isAVX2)
out.remove(CpuInfo::kX86FeatureAVX);
else
out.remove(CpuInfo::kX86FeatureAVX2);
}
// Check for AVX|AVX2|FMA|F16C vs AVX512 overlap.
if (out.has(CpuInfo::kX86FeatureAVX) || out.has(CpuInfo::kX86FeatureAVX2) || out.has(CpuInfo::kX86FeatureFMA) || out.has(CpuInfo::kX86FeatureF16C)) {
// Only AVX512-F|BW|DQ allow to encode AVX/AVX2 instructions
if (out.has(CpuInfo::kX86FeatureAVX512_F) || out.has(CpuInfo::kX86FeatureAVX512_BW) || out.has(CpuInfo::kX86FeatureAVX512_DQ)) {
uint32_t options = detail.options;
uint32_t kAvx512Options = X86Inst::kOptionEvex | X86Inst::_kOptionAvx512Mask;
if (!(mask & Utils::mask(X86Reg::kRegZmm, X86Reg::kRegK)) && !(options & (kAvx512Options)) && detail.extraReg.getType() != X86Reg::kRegK) {
out.remove(CpuInfo::kX86FeatureAVX512_F)
.remove(CpuInfo::kX86FeatureAVX512_BW)
.remove(CpuInfo::kX86FeatureAVX512_DQ)
.remove(CpuInfo::kX86FeatureAVX512_VL);
}
}
}
// Remove or keep AVX512_VL feature.
if (out.has(CpuInfo::kX86FeatureAVX512_VL)) {
if (!(mask & Utils::mask(X86Reg::kRegZmm)))
out.remove(CpuInfo::kX86FeatureAVX512_VL);
}
}
return kErrorOk;
}
#endif
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"

View file

@ -0,0 +1,45 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86INSTIMPL_P_H
#define _ASMJIT_X86_X86INSTIMPL_P_H
// [Dependencies]
#include "../x86/x86inst.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
//! \internal
//!
//! Contains X86/X64 specific implementation of APIs provided by `asmjit::Inst`.
//!
//! The purpose of `X86InstImpl` is to move most of the logic out of `X86Inst`.
struct X86InstImpl {
#if !defined(ASMJIT_DISABLE_VALIDATION)
static Error validate(uint32_t archType, const Inst::Detail& detail, const Operand_* operands, uint32_t count) noexcept;
#endif
#if !defined(ASMJIT_DISABLE_EXTENSIONS)
static Error checkFeatures(uint32_t archType, const Inst::Detail& detail, const Operand_* operands, uint32_t count, CpuFeatures& out) noexcept;
#endif
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_X86_X86INSTIMPL_P_H

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,79 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86INTERNAL_P_H
#define _ASMJIT_X86_X86INTERNAL_P_H
#include "../asmjit_build.h"
// [Dependencies]
#include "../base/func.h"
#include "../x86/x86emitter.h"
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::X86Internal]
// ============================================================================
//! \internal
//!
//! X86 utilities used at multiple places, not part of public API, not exported.
struct X86Internal {
//! Initialize `CallConv` to X86/X64 specific calling convention.
static Error initCallConv(CallConv& cc, uint32_t ccId) noexcept;
//! Initialize `FuncDetail` to X86/X64 specific function signature.
static Error initFuncDetail(FuncDetail& func, const FuncSignature& sign, uint32_t gpSize) noexcept;
//! Initialize `FuncFrameLayout` from X86/X64 specific function detail and frame information.
static Error initFrameLayout(FuncFrameLayout& layout, const FuncDetail& func, const FuncFrameInfo& ffi) noexcept;
static Error argsToFrameInfo(const FuncArgsMapper& args, FuncFrameInfo& ffi) noexcept;
//! Emit function prolog.
static Error emitProlog(X86Emitter* emitter, const FuncFrameLayout& layout);
//! Emit function epilog.
static Error emitEpilog(X86Emitter* emitter, const FuncFrameLayout& layout);
//! Emit a pure move operation between two registers or the same type or
//! between a register and its home slot. This function does not handle
//! register conversion.
static Error emitRegMove(X86Emitter* emitter,
const Operand_& dst_,
const Operand_& src_, uint32_t typeId, bool avxEnabled, const char* comment = nullptr);
//! Emit move from a function argument (either register or stack) to a register.
//!
//! This function can handle the necessary conversion from one argument to
//! another, and from one register type to another, if it's possible. Any
//! attempt of conversion that requires third register of a different kind
//! (for example conversion from K to MMX) will fail.
static Error emitArgMove(X86Emitter* emitter,
const X86Reg& dst_, uint32_t dstTypeId,
const Operand_& src_, uint32_t srcTypeId, bool avxEnabled, const char* comment = nullptr);
static Error allocArgs(X86Emitter* emitter, const FuncFrameLayout& layout, const FuncArgsMapper& args);
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_X86_X86INTERNAL_P_H

View file

@ -0,0 +1,684 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_LOGGING)
// [Dependencies]
#include "../base/misc_p.h"
#include "../x86/x86inst.h"
#include "../x86/x86logging_p.h"
#include "../x86/x86operand.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
#include "../base/codecompiler.h"
#endif // !ASMJIT_DISABLE_COMPILER
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86Logging - Constants]
// ============================================================================
struct X86RegFormatInfo {
uint8_t count;
uint8_t formatIndex;
uint8_t specialIndex;
uint8_t specialCount;
};
static const char x86RegFormatStrings[] =
"r%ub" "\0" // #0
"r%uh" "\0" // #5
"r%uw" "\0" // #10
"r%ud" "\0" // #15
"r%u" "\0" // #20
"xmm%u" "\0" // #24
"ymm%u" "\0" // #30
"zmm%u" "\0" // #36
"rip%u" "\0" // #42
"seg%u" "\0" // #48
"fp%u" "\0" // #54
"mm%u" "\0" // #59
"k%u" "\0" // #64
"bnd%u" "\0" // #68
"cr%u" "\0" // #74
"dr%u" "\0" // #79
"rip\0" // #84
"\0\0\0\0" // #88
"\0\0\0\0" // #92
"al\0\0" "cl\0\0" "dl\0\0" "bl\0\0" "spl\0" "bpl\0" "sil\0" "dil\0" // #96
"ah\0\0" "ch\0\0" "dh\0\0" "bh\0\0" "n/a\0" "n/a\0" "n/a\0" "n/a\0" // #128
"eax\0" "ecx\0" "edx\0" "ebx\0" "esp\0" "ebp\0" "esi\0" "edi\0" // #160
"rax\0" "rcx\0" "rdx\0" "rbx\0" "rsp\0" "rbp\0" "rsi\0" "rdi\0" // #192
"n/a\0" "es\0\0" "cs\0\0" "ss\0\0" "ds\0\0" "fs\0\0" "gs\0\0" "n/a\0"; // #224
template<uint32_t X>
struct X86RegFormatInfo_T {
enum {
kFormatIndex = X == X86Reg::kRegGpbLo ? 0 :
X == X86Reg::kRegGpbHi ? 5 :
X == X86Reg::kRegGpw ? 10 :
X == X86Reg::kRegGpd ? 15 :
X == X86Reg::kRegGpq ? 20 :
X == X86Reg::kRegXmm ? 24 :
X == X86Reg::kRegYmm ? 30 :
X == X86Reg::kRegZmm ? 36 :
X == X86Reg::kRegRip ? 42 :
X == X86Reg::kRegSeg ? 48 :
X == X86Reg::kRegFp ? 54 :
X == X86Reg::kRegMm ? 59 :
X == X86Reg::kRegK ? 64 :
X == X86Reg::kRegBnd ? 68 :
X == X86Reg::kRegCr ? 74 :
X == X86Reg::kRegDr ? 79 : 0,
kSpecialIndex = X == X86Reg::kRegGpbLo ? 96 :
X == X86Reg::kRegGpbHi ? 128 :
X == X86Reg::kRegGpw ? 161 :
X == X86Reg::kRegGpd ? 160 :
X == X86Reg::kRegGpq ? 192 :
X == X86Reg::kRegRip ? 84 :
X == X86Reg::kRegSeg ? 224 : 0,
kSpecialCount = X == X86Reg::kRegGpbLo ? 8 :
X == X86Reg::kRegGpbHi ? 4 :
X == X86Reg::kRegGpw ? 8 :
X == X86Reg::kRegGpd ? 8 :
X == X86Reg::kRegGpq ? 8 :
X == X86Reg::kRegRip ? 1 :
X == X86Reg::kRegSeg ? 7 : 0
};
};
#define ASMJIT_X86_REG_FORMAT(TYPE) { \
X86RegTraits<TYPE>::kCount, \
X86RegFormatInfo_T<TYPE>::kFormatIndex, \
X86RegFormatInfo_T<TYPE>::kSpecialIndex, \
X86RegFormatInfo_T<TYPE>::kSpecialCount \
}
static const X86RegFormatInfo x86RegFormatInfo[] = {
ASMJIT_TABLE_16(ASMJIT_X86_REG_FORMAT, 0 ),
ASMJIT_TABLE_16(ASMJIT_X86_REG_FORMAT, 16)
};
static const char* x86GetAddressSizeString(uint32_t size) noexcept {
switch (size) {
case 1 : return "byte ";
case 2 : return "word ";
case 4 : return "dword ";
case 6 : return "fword ";
case 8 : return "qword ";
case 10: return "tword ";
case 16: return "oword ";
case 32: return "yword ";
case 64: return "zword ";
default: return "";
}
}
// ============================================================================
// [asmjit::X86Logging - Format Operand]
// ============================================================================
ASMJIT_FAVOR_SIZE Error X86Logging::formatOperand(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Operand_& op) noexcept {
if (op.isReg())
return formatRegister(sb, logOptions, emitter, archType, op.as<Reg>().getType(), op.as<Reg>().getId());
if (op.isMem()) {
const X86Mem& m = op.as<X86Mem>();
ASMJIT_PROPAGATE(sb.appendString(x86GetAddressSizeString(m.getSize())));
// Segment override prefix.
uint32_t seg = m.getSegmentId();
if (seg != X86Seg::kIdNone && seg < X86Seg::kIdCount)
ASMJIT_PROPAGATE(sb.appendFormat("%s:", x86RegFormatStrings + 224 + seg * 4));
ASMJIT_PROPAGATE(sb.appendChar('['));
if (m.isAbs())
ASMJIT_PROPAGATE(sb.appendString("abs "));
if (m.hasBase()) {
if (m.hasBaseLabel()) {
ASMJIT_PROPAGATE(Logging::formatLabel(sb, logOptions, emitter, m.getBaseId()));
}
else {
if (m.isArgHome()) ASMJIT_PROPAGATE(sb.appendString("$"));
if (m.isRegHome()) ASMJIT_PROPAGATE(sb.appendString("&"));
ASMJIT_PROPAGATE(formatRegister(sb, logOptions, emitter, archType, m.getBaseType(), m.getBaseId()));
}
}
if (m.hasIndex()) {
ASMJIT_PROPAGATE(sb.appendChar('+'));
ASMJIT_PROPAGATE(formatRegister(sb, logOptions, emitter, archType, m.getIndexType(), m.getIndexId()));
if (m.hasShift())
ASMJIT_PROPAGATE(sb.appendFormat("*%u", 1 << m.getShift()));
}
uint64_t off = static_cast<uint64_t>(m.getOffset());
if (off) {
uint32_t base = 10;
char prefix = '+';
if (static_cast<int64_t>(off) < 0) {
off = ~off + 1;
prefix = '-';
}
ASMJIT_PROPAGATE(sb.appendChar(prefix));
if ((logOptions & Logger::kOptionHexDisplacement) != 0 && off > 9) {
ASMJIT_PROPAGATE(sb.appendString("0x", 2));
base = 16;
}
ASMJIT_PROPAGATE(sb.appendUInt(off, base));
}
return sb.appendChar(']');
}
if (op.isImm()) {
const Imm& i = op.as<Imm>();
int64_t val = i.getInt64();
if ((logOptions & Logger::kOptionHexImmediate) != 0 && static_cast<uint64_t>(val) > 9)
return sb.appendUInt(static_cast<uint64_t>(val), 16);
else
return sb.appendInt(val, 10);
}
if (op.isLabel()) {
return Logging::formatLabel(sb, logOptions, emitter, op.getId());
}
return sb.appendString("<None>");
}
// ============================================================================
// [asmjit::X86Logging - Format Immediate (Extension)]
// ============================================================================
struct ImmBits {
enum Mode {
kModeLookup = 0x0,
kModeFormat = 0x1
};
uint8_t mask;
uint8_t shift;
uint8_t mode;
char text[45];
};
ASMJIT_FAVOR_SIZE static Error X86Logging_formatImmShuf(StringBuilder& sb, uint32_t u8, uint32_t bits, uint32_t count) noexcept {
ASMJIT_PROPAGATE(sb.appendChar('<'));
uint32_t mask = (1 << bits) - 1;
for (uint32_t i = 0; i < count; i++, u8 >>= bits) {
uint32_t value = u8 & mask;
if (i != 0)
ASMJIT_PROPAGATE(sb.appendChar('|'));
ASMJIT_PROPAGATE(sb.appendUInt(value));
}
return sb.appendChar('>');
}
ASMJIT_FAVOR_SIZE static Error X86Logging_formatImmBits(StringBuilder& sb, uint32_t u8, const ImmBits* bits, uint32_t count) noexcept {
uint32_t n = 0;
char buf[64];
for (uint32_t i = 0; i < count; i++) {
const ImmBits& spec = bits[i];
uint32_t value = (u8 & static_cast<uint32_t>(spec.mask)) >> spec.shift;
const char* str = nullptr;
switch (spec.mode) {
case ImmBits::kModeLookup:
str = Utils::findPackedString(spec.text, value);
break;
case ImmBits::kModeFormat:
snprintf(buf, sizeof(buf), spec.text, static_cast<unsigned int>(value));
str = buf;
break;
default:
return DebugUtils::errored(kErrorInvalidState);
}
if (!str[0])
continue;
ASMJIT_PROPAGATE(sb.appendChar(++n == 1 ? '<' : '|'));
ASMJIT_PROPAGATE(sb.appendString(str));
}
return n ? sb.appendChar('>') : static_cast<Error>(kErrorOk);
}
ASMJIT_FAVOR_SIZE static Error X86Logging_formatImmText(StringBuilder& sb, uint32_t u8, uint32_t bits, uint32_t advance, const char* text, uint32_t count = 1) noexcept {
ASMJIT_PROPAGATE(sb.appendChar('<'));
uint32_t mask = (1 << bits) - 1;
uint32_t pos = 0;
for (uint32_t i = 0; i < count; i++, u8 >>= bits, pos += advance) {
uint32_t value = (u8 & mask) + pos;
if (i != 0)
ASMJIT_PROPAGATE(sb.appendChar('|'));
ASMJIT_PROPAGATE(sb.appendString(Utils::findPackedString(text, value)));
}
return sb.appendChar('>');
}
ASMJIT_FAVOR_SIZE static Error X86Logging_formatImmExtended(
StringBuilder& sb,
uint32_t logOptions,
uint32_t instId,
uint32_t vecSize,
const Imm& imm) noexcept {
static const char vcmpx[] =
"eq_oq\0" "lt_os\0" "le_os\0" "unord_q\0" "neq_uq\0" "nlt_us\0" "nle_us\0" "ord_q\0"
"eq_uq\0" "nge_us\0" "ngt_us\0" "false_oq\0" "neq_oq\0" "ge_os\0" "gt_os\0" "true_uq\0"
"eq_os\0" "lt_oq\0" "le_oq\0" "unord_s\0" "neq_us\0" "nlt_uq\0" "nle_uq\0" "ord_s\0"
"eq_us\0" "nge_uq\0" "ngt_uq\0" "false_os\0" "neq_os\0" "ge_oq\0" "gt_oq\0" "true_us\0";
// Try to find 7 differences...
static const char vpcmpx[] = "eq\0" "lt\0" "le\0" "false\0" "neq\0" "ge\0" "gt\0" "true\0";
static const char vpcomx[] = "lt\0" "le\0" "gt\0" "ge\0" "eq\0" "neq\0" "false\0" "true\0";
static const char vshufpd[] = "a0\0a1\0b0\0b1\0a2\0a3\0b2\0b3\0a4\0a5\0b4\0b5\0a6\0a7\0b6\0b7\0";
static const char vshufps[] = "a0\0a1\0a2\0a3\0a0\0a1\0a2\0a3\0b0\0b1\0b2\0b3\0b0\0b1\0b2\0b3\0";
static const ImmBits vfpclassxx[] = {
{ 0x07, 0, ImmBits::kModeLookup, "qnan\0" "+0\0" "-0\0" "+inf\0" "-inf\0" "denormal\0" "-finite\0" "snan\0" }
};
static const ImmBits vgetmantxx[] = {
{ 0x03, 0, ImmBits::kModeLookup, "[1, 2)\0" "[1/2, 2)\0" "1/2, 1)\0" "[3/4, 3/2)\0" },
{ 0x04, 2, ImmBits::kModeLookup, "\0" "no-sign\0" },
{ 0x08, 3, ImmBits::kModeLookup, "\0" "qnan-if-sign\0" }
};
static const ImmBits vmpsadbw[] = {
{ 0x04, 2, ImmBits::kModeLookup, "blk1[0]\0" "blk1[1]\0" },
{ 0x03, 0, ImmBits::kModeLookup, "blk2[0]\0" "blk2[1]\0" "blk2[2]\0" "blk2[3]\0" },
{ 0x40, 6, ImmBits::kModeLookup, "blk1[4]\0" "blk1[5]\0" },
{ 0x30, 4, ImmBits::kModeLookup, "blk2[4]\0" "blk2[5]\0" "blk2[6]\0" "blk2[7]\0" }
};
static const ImmBits vpclmulqdq[] = {
{ 0x01, 0, ImmBits::kModeLookup, "lq\0" "hq\0" },
{ 0x10, 4, ImmBits::kModeLookup, "lq\0" "hq\0" }
};
static const ImmBits vperm2x128[] = {
{ 0x0B, 0, ImmBits::kModeLookup, "a0\0" "a1\0" "b0\0" "b1\0" "\0" "\0" "\0" "\0" "0\0" "0\0" "0\0" "0\0" },
{ 0xB0, 4, ImmBits::kModeLookup, "a0\0" "a1\0" "b0\0" "b1\0" "\0" "\0" "\0" "\0" "0\0" "0\0" "0\0" "0\0" }
};
static const ImmBits vrangexx[] = {
{ 0x03, 0, ImmBits::kModeLookup, "min\0" "max\0" "min-abs\0" "max-abs\0" },
{ 0x0C, 2, ImmBits::kModeLookup, "sign=src1\0" "sign=src2\0" "sign=0\0" "sign=1\0" }
};
static const ImmBits vreducexx_vrndscalexx[] = {
{ 0x07, 0, ImmBits::kModeLookup, "\0" "\0" "\0" "\0" "round\0" "floor\0" "ceil\0" "truncate\0" },
{ 0x08, 3, ImmBits::kModeLookup, "\0" "suppress\0" },
{ 0xF0, 4, ImmBits::kModeFormat, "len=%d" }
};
static const ImmBits vroundxx[] = {
{ 0x07, 0, ImmBits::kModeLookup, "round\0" "floor\0" "ceil\0" "truncate\0" "\0" "\0" "\0" "\0" },
{ 0x08, 3, ImmBits::kModeLookup, "\0" "inexact\0" }
};
uint32_t u8 = imm.getUInt8();
switch (instId) {
case X86Inst::kIdVblendpd:
case X86Inst::kIdBlendpd:
return X86Logging_formatImmShuf(sb, u8, 1, vecSize / 8);
case X86Inst::kIdVblendps:
case X86Inst::kIdBlendps:
return X86Logging_formatImmShuf(sb, u8, 1, vecSize / 4);
case X86Inst::kIdVcmppd:
case X86Inst::kIdVcmpps:
case X86Inst::kIdVcmpsd:
case X86Inst::kIdVcmpss:
return X86Logging_formatImmText(sb, u8, 5, 0, vcmpx);
case X86Inst::kIdCmppd:
case X86Inst::kIdCmpps:
case X86Inst::kIdCmpsd:
case X86Inst::kIdCmpss:
return X86Logging_formatImmText(sb, u8, 3, 0, vcmpx);
case X86Inst::kIdVdbpsadbw:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
case X86Inst::kIdVdppd:
case X86Inst::kIdVdpps:
case X86Inst::kIdDppd:
case X86Inst::kIdDpps:
return X86Logging_formatImmShuf(sb, u8, 1, 8);
case X86Inst::kIdVmpsadbw:
case X86Inst::kIdMpsadbw:
return X86Logging_formatImmBits(sb, u8, vmpsadbw, std::min<uint32_t>(vecSize / 8, 4));
case X86Inst::kIdVpblendw:
case X86Inst::kIdPblendw:
return X86Logging_formatImmShuf(sb, u8, 1, 8);
case X86Inst::kIdVpblendd:
return X86Logging_formatImmShuf(sb, u8, 1, std::min<uint32_t>(vecSize / 4, 8));
case X86Inst::kIdVpclmulqdq:
case X86Inst::kIdPclmulqdq:
return X86Logging_formatImmBits(sb, u8, vpclmulqdq, ASMJIT_ARRAY_SIZE(vpclmulqdq));
case X86Inst::kIdVroundpd:
case X86Inst::kIdVroundps:
case X86Inst::kIdVroundsd:
case X86Inst::kIdVroundss:
case X86Inst::kIdRoundpd:
case X86Inst::kIdRoundps:
case X86Inst::kIdRoundsd:
case X86Inst::kIdRoundss:
return X86Logging_formatImmBits(sb, u8, vroundxx, ASMJIT_ARRAY_SIZE(vroundxx));
case X86Inst::kIdVshufpd:
case X86Inst::kIdShufpd:
return X86Logging_formatImmText(sb, u8, 1, 2, vshufpd, std::min<uint32_t>(vecSize / 8, 8));
case X86Inst::kIdVshufps:
case X86Inst::kIdShufps:
return X86Logging_formatImmText(sb, u8, 2, 4, vshufps, 4);
case X86Inst::kIdVcvtps2ph:
return X86Logging_formatImmBits(sb, u8, vroundxx, 1);
case X86Inst::kIdVperm2f128:
case X86Inst::kIdVperm2i128:
return X86Logging_formatImmBits(sb, u8, vperm2x128, ASMJIT_ARRAY_SIZE(vperm2x128));
case X86Inst::kIdVpermilpd:
return X86Logging_formatImmShuf(sb, u8, 1, vecSize / 8);
case X86Inst::kIdVpermilps:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
case X86Inst::kIdVpshufd:
case X86Inst::kIdPshufd:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
case X86Inst::kIdVpshufhw:
case X86Inst::kIdVpshuflw:
case X86Inst::kIdPshufhw:
case X86Inst::kIdPshuflw:
case X86Inst::kIdPshufw:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
// TODO: Maybe?
case X86Inst::kIdVfixupimmpd:
case X86Inst::kIdVfixupimmps:
case X86Inst::kIdVfixupimmsd:
case X86Inst::kIdVfixupimmss:
return kErrorOk;
case X86Inst::kIdVfpclasspd:
case X86Inst::kIdVfpclassps:
case X86Inst::kIdVfpclasssd:
case X86Inst::kIdVfpclassss:
return X86Logging_formatImmBits(sb, u8, vfpclassxx, ASMJIT_ARRAY_SIZE(vfpclassxx));
case X86Inst::kIdVgetmantpd:
case X86Inst::kIdVgetmantps:
case X86Inst::kIdVgetmantsd:
case X86Inst::kIdVgetmantss:
return X86Logging_formatImmBits(sb, u8, vgetmantxx, ASMJIT_ARRAY_SIZE(vgetmantxx));
case X86Inst::kIdVpcmpb:
case X86Inst::kIdVpcmpd:
case X86Inst::kIdVpcmpq:
case X86Inst::kIdVpcmpw:
case X86Inst::kIdVpcmpub:
case X86Inst::kIdVpcmpud:
case X86Inst::kIdVpcmpuq:
case X86Inst::kIdVpcmpuw:
return X86Logging_formatImmText(sb, u8, 2, 4, vpcmpx, 4);
case X86Inst::kIdVpcomb:
case X86Inst::kIdVpcomd:
case X86Inst::kIdVpcomq:
case X86Inst::kIdVpcomw:
case X86Inst::kIdVpcomub:
case X86Inst::kIdVpcomud:
case X86Inst::kIdVpcomuq:
case X86Inst::kIdVpcomuw:
return X86Logging_formatImmText(sb, u8, 2, 4, vpcomx, 4);
case X86Inst::kIdVpermq:
case X86Inst::kIdVpermpd:
return X86Logging_formatImmShuf(sb, u8, 2, 4);
case X86Inst::kIdVpternlogd:
case X86Inst::kIdVpternlogq:
return X86Logging_formatImmShuf(sb, u8, 1, 8);
case X86Inst::kIdVrangepd:
case X86Inst::kIdVrangeps:
case X86Inst::kIdVrangesd:
case X86Inst::kIdVrangess:
return X86Logging_formatImmBits(sb, u8, vrangexx, ASMJIT_ARRAY_SIZE(vrangexx));
case X86Inst::kIdVreducepd:
case X86Inst::kIdVreduceps:
case X86Inst::kIdVreducesd:
case X86Inst::kIdVreducess:
case X86Inst::kIdVrndscalepd:
case X86Inst::kIdVrndscaleps:
case X86Inst::kIdVrndscalesd:
case X86Inst::kIdVrndscaless:
return X86Logging_formatImmBits(sb, u8, vreducexx_vrndscalexx, ASMJIT_ARRAY_SIZE(vreducexx_vrndscalexx));
case X86Inst::kIdVshuff32x4:
case X86Inst::kIdVshuff64x2:
case X86Inst::kIdVshufi32x4:
case X86Inst::kIdVshufi64x2: {
uint32_t count = std::max<uint32_t>(vecSize / 16, 2);
uint32_t bits = count <= 2 ? 1 : 2;
return X86Logging_formatImmShuf(sb, u8, bits, count);
}
default:
return kErrorOk;
}
}
// ============================================================================
// [asmjit::X86Logging - Format Register]
// ============================================================================
ASMJIT_FAVOR_SIZE Error X86Logging::formatRegister(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t rType,
uint32_t rId) noexcept {
ASMJIT_UNUSED(logOptions);
ASMJIT_UNUSED(archType);
if (Operand::isPackedId(rId)) {
#if !defined(ASMJIT_DISABLE_COMPILER)
if (emitter && emitter->getType() == CodeEmitter::kTypeCompiler) {
const CodeCompiler* cc = static_cast<const CodeCompiler*>(emitter);
if (cc->isVirtRegValid(rId)) {
VirtReg* vReg = cc->getVirtRegById(rId);
ASMJIT_ASSERT(vReg != nullptr);
const char* name = vReg->getName();
if (name && name[0] != '\0')
return sb.appendString(name);
else
return sb.appendFormat("v%u", static_cast<unsigned int>(Operand::unpackId(rId)));
}
}
#endif // !ASMJIT_DISABLE_COMPILER
return sb.appendFormat("VirtReg<Type=%u Id=%u>", rType, rId);
}
else {
if (rType < ASMJIT_ARRAY_SIZE(x86RegFormatInfo)) {
const X86RegFormatInfo& rfi = x86RegFormatInfo[rType];
if (rId < rfi.specialCount)
return sb.appendString(x86RegFormatStrings + rfi.specialIndex + rId * 4);
if (rId < rfi.count)
return sb.appendFormat(x86RegFormatStrings + rfi.formatIndex, static_cast<unsigned int>(rId));
}
return sb.appendFormat("PhysReg<Type=%u Id=%u>", rType, rId);
}
}
// ============================================================================
// [asmjit::X86Logging - Format Instruction]
// ============================================================================
ASMJIT_FAVOR_SIZE Error X86Logging::formatInstruction(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Inst::Detail& detail, const Operand_* opArray, uint32_t opCount) noexcept {
uint32_t instId = detail.instId;
uint32_t options = detail.options;
// Format instruction options and instruction mnemonic.
if (instId < X86Inst::_kIdCount) {
const X86Inst& instInfo = X86Inst::getInst(instId);
// SHORT|LONG options.
if (options & X86Inst::kOptionShortForm) ASMJIT_PROPAGATE(sb.appendString("short "));
if (options & X86Inst::kOptionLongForm) ASMJIT_PROPAGATE(sb.appendString("long "));
// LOCK|XACQUIRE|XRELEASE options.
if (options & X86Inst::kOptionXAcquire) ASMJIT_PROPAGATE(sb.appendString("xacquire "));
if (options & X86Inst::kOptionXRelease) ASMJIT_PROPAGATE(sb.appendString("xrelease "));
if (options & X86Inst::kOptionLock) ASMJIT_PROPAGATE(sb.appendString("lock "));
// REP|REPNZ options.
if (options & (X86Inst::kOptionRep | X86Inst::kOptionRepnz)) {
sb.appendString((options & X86Inst::kOptionRep) ? "rep " : "repnz ");
if (detail.hasExtraReg()) {
ASMJIT_PROPAGATE(sb.appendChar('{'));
ASMJIT_PROPAGATE(formatOperand(sb, logOptions, emitter, archType, detail.extraReg.toReg<Reg>()));
ASMJIT_PROPAGATE(sb.appendString("} "));
}
}
// REX options.
if (options & X86Inst::kOptionRex) {
const uint32_t kRXBWMask = X86Inst::kOptionOpCodeR |
X86Inst::kOptionOpCodeX |
X86Inst::kOptionOpCodeB |
X86Inst::kOptionOpCodeW ;
if (options & kRXBWMask) {
sb.appendString("rex.");
if (options & X86Inst::kOptionOpCodeR) sb.appendChar('r');
if (options & X86Inst::kOptionOpCodeX) sb.appendChar('x');
if (options & X86Inst::kOptionOpCodeB) sb.appendChar('b');
if (options & X86Inst::kOptionOpCodeW) sb.appendChar('w');
sb.appendChar(' ');
}
else {
ASMJIT_PROPAGATE(sb.appendString("rex "));
}
}
// VEX|EVEX options.
if (options & X86Inst::kOptionVex3) ASMJIT_PROPAGATE(sb.appendString("vex3 "));
if (options & X86Inst::kOptionEvex) ASMJIT_PROPAGATE(sb.appendString("evex "));
ASMJIT_PROPAGATE(sb.appendString(instInfo.getName()));
}
else {
ASMJIT_PROPAGATE(sb.appendFormat("<unknown id=#%u>", static_cast<unsigned int>(instId)));
}
for (uint32_t i = 0; i < opCount; i++) {
const Operand_& op = opArray[i];
if (op.isNone()) break;
ASMJIT_PROPAGATE(sb.appendString(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, logOptions, emitter, archType, op));
if (op.isImm() && (logOptions & Logger::kOptionImmExtended)) {
uint32_t vecSize = 16;
for (uint32_t j = 0; j < opCount; j++)
if (opArray[j].isReg())
vecSize = std::max<uint32_t>(vecSize, opArray[j].getSize());
ASMJIT_PROPAGATE(X86Logging_formatImmExtended(sb, logOptions, instId, vecSize, op.as<Imm>()));
}
// Support AVX-512 {k}{z}.
if (i == 0) {
if (detail.extraReg.getKind() == X86Reg::kKindK) {
ASMJIT_PROPAGATE(sb.appendString(" {"));
ASMJIT_PROPAGATE(formatOperand(sb, logOptions, emitter, archType, detail.extraReg.toReg<Reg>()));
ASMJIT_PROPAGATE(sb.appendChar('}'));
if (options & X86Inst::kOptionZMask)
ASMJIT_PROPAGATE(sb.appendString("{z}"));
}
else if (options & X86Inst::kOptionZMask) {
ASMJIT_PROPAGATE(sb.appendString(" {z}"));
}
}
// Support AVX-512 {1tox}.
if (op.isMem() && (options & X86Inst::kOption1ToX))
ASMJIT_PROPAGATE(sb.appendString(" {1tox}"));
}
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_LOGGING

View file

@ -0,0 +1,63 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86LOGGING_P_H
#define _ASMJIT_X86_X86LOGGING_P_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_LOGGING)
// [Dependencies]
#include "../base/logging.h"
#include "../x86/x86globals.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::X86Logging]
// ============================================================================
struct X86Logging {
static Error formatRegister(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
uint32_t regType,
uint32_t regId) noexcept;
static Error formatOperand(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Operand_& op) noexcept;
static Error formatInstruction(
StringBuilder& sb,
uint32_t logOptions,
const CodeEmitter* emitter,
uint32_t archType,
const Inst::Detail& detail, const Operand_* opArray, uint32_t opCount) noexcept;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_LOGGING
#endif // _ASMJIT_X86_X86LOGGING_P_H

View file

@ -0,0 +1,388 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86MISC_H
#define _ASMJIT_X86_X86MISC_H
// [Dependencies]
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::X86RegCount]
// ============================================================================
//! \internal
//!
//! X86/X64 registers count.
//!
//! Since the number of registers changed across CPU generations `X86RegCount`
//! class is used by `X86Assembler` and `X86Compiler` to provide a way to get
//! number of available registers dynamically. 32-bit mode offers always only
//! 8 registers of all classes, however, 64-bit mode offers 16 GP registers and
//! 16 XMM/YMM/ZMM registers. AVX512 instruction set doubles the number of SIMD
//! registers (XMM/YMM/ZMM) to 32, this mode has to be explicitly enabled to
//! take effect as it changes some assumptions.
//!
//! `X86RegCount` is also used extensively by X86Compiler's register allocator
//! and data structures. FP registers were omitted as they are never mapped to
//! variables, thus, not needed to be managed.
//!
//! NOTE: At the moment `X86RegCount` can fit into 32-bits, having 8-bits for
//! each register kind except FP. This can change in the future after a new
//! instruction set, which adds more registers, is introduced.
struct X86RegCount {
// --------------------------------------------------------------------------
// [Zero]
// --------------------------------------------------------------------------
//! Reset all counters to zero.
ASMJIT_INLINE void reset() noexcept { _packed = 0; }
// --------------------------------------------------------------------------
// [Get]
// --------------------------------------------------------------------------
//! Get register count by a register `kind`.
ASMJIT_INLINE uint32_t get(uint32_t kind) const noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
uint32_t shift = Utils::byteShiftOfDWordStruct(kind);
return (_packed >> shift) & static_cast<uint32_t>(0xFF);
}
//! Get Gp count.
ASMJIT_INLINE uint32_t getGp() const noexcept { return get(X86Reg::kKindGp); }
//! Get Mm count.
ASMJIT_INLINE uint32_t getMm() const noexcept { return get(X86Reg::kKindMm); }
//! Get K count.
ASMJIT_INLINE uint32_t getK() const noexcept { return get(X86Reg::kKindK); }
//! Get XMM/YMM/ZMM count.
ASMJIT_INLINE uint32_t getVec() const noexcept { return get(X86Reg::kKindVec); }
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
//! Set register count by a register `kind`.
ASMJIT_INLINE void set(uint32_t kind, uint32_t n) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
ASMJIT_ASSERT(n <= 0xFF);
uint32_t shift = Utils::byteShiftOfDWordStruct(kind);
_packed = (_packed & ~static_cast<uint32_t>(0xFF << shift)) + (n << shift);
}
//! Set Gp count.
ASMJIT_INLINE void setGp(uint32_t n) noexcept { set(X86Reg::kKindGp, n); }
//! Set Mm count.
ASMJIT_INLINE void setMm(uint32_t n) noexcept { set(X86Reg::kKindMm, n); }
//! Set K count.
ASMJIT_INLINE void setK(uint32_t n) noexcept { set(X86Reg::kKindK, n); }
//! Set XMM/YMM/ZMM count.
ASMJIT_INLINE void setVec(uint32_t n) noexcept { set(X86Reg::kKindVec, n); }
// --------------------------------------------------------------------------
// [Add]
// --------------------------------------------------------------------------
//! Add register count by a register `kind`.
ASMJIT_INLINE void add(uint32_t kind, uint32_t n = 1) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
ASMJIT_ASSERT(0xFF - static_cast<uint32_t>(_regs[kind]) >= n);
uint32_t shift = Utils::byteShiftOfDWordStruct(kind);
_packed += n << shift;
}
//! Add GP count.
ASMJIT_INLINE void addGp(uint32_t n) noexcept { add(X86Reg::kKindGp, n); }
//! Add MMX count.
ASMJIT_INLINE void addMm(uint32_t n) noexcept { add(X86Reg::kKindMm, n); }
//! Add K count.
ASMJIT_INLINE void addK(uint32_t n) noexcept { add(X86Reg::kKindK, n); }
//! Add XMM/YMM/ZMM count.
ASMJIT_INLINE void addVec(uint32_t n) noexcept { add(X86Reg::kKindVec, n); }
// --------------------------------------------------------------------------
// [Misc]
// --------------------------------------------------------------------------
//! Build register indexes based on the given `count` of registers.
ASMJIT_INLINE void indexFromRegCount(const X86RegCount& count) noexcept {
uint32_t x = static_cast<uint32_t>(count._regs[0]);
uint32_t y = static_cast<uint32_t>(count._regs[1]) + x;
uint32_t z = static_cast<uint32_t>(count._regs[2]) + y;
ASMJIT_ASSERT(y <= 0xFF);
ASMJIT_ASSERT(z <= 0xFF);
_packed = Utils::pack32_4x8(0, x, y, z);
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
//! Count of GP registers.
uint8_t _gp;
//! Count of XMM|YMM|ZMM registers.
uint8_t _vec;
//! Count of MMX registers.
uint8_t _mm;
//! Count of K registers.
uint8_t _k;
};
uint8_t _regs[4];
uint32_t _packed;
};
};
// ============================================================================
// [asmjit::X86RegMask]
// ============================================================================
//! \internal
//!
//! X86/X64 registers mask.
struct X86RegMask {
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Reset all register masks to zero.
ASMJIT_INLINE void reset() noexcept {
_packed.reset();
}
// --------------------------------------------------------------------------
// [IsEmpty / Has]
// --------------------------------------------------------------------------
//! Get whether all register masks are zero (empty).
ASMJIT_INLINE bool isEmpty() const noexcept {
return _packed.isZero();
}
ASMJIT_INLINE bool has(uint32_t kind, uint32_t mask = 0xFFFFFFFFU) const noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : return (static_cast<uint32_t>(_gp ) & mask) != 0;
case X86Reg::kKindVec: return (static_cast<uint32_t>(_vec) & mask) != 0;
case X86Reg::kKindMm : return (static_cast<uint32_t>(_mm ) & mask) != 0;
case X86Reg::kKindK : return (static_cast<uint32_t>(_k ) & mask) != 0;
}
return false;
}
ASMJIT_INLINE bool hasGp(uint32_t mask = 0xFFFFFFFFU) const noexcept { return has(X86Reg::kKindGp, mask); }
ASMJIT_INLINE bool hasVec(uint32_t mask = 0xFFFFFFFFU) const noexcept { return has(X86Reg::kKindVec, mask); }
ASMJIT_INLINE bool hasMm(uint32_t mask = 0xFFFFFFFFU) const noexcept { return has(X86Reg::kKindMm, mask); }
ASMJIT_INLINE bool hasK(uint32_t mask = 0xFFFFFFFFU) const noexcept { return has(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [Get]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t get(uint32_t kind) const noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : return _gp;
case X86Reg::kKindVec: return _vec;
case X86Reg::kKindMm : return _mm;
case X86Reg::kKindK : return _k;
}
return 0;
}
ASMJIT_INLINE uint32_t getGp() const noexcept { return get(X86Reg::kKindGp); }
ASMJIT_INLINE uint32_t getVec() const noexcept { return get(X86Reg::kKindVec); }
ASMJIT_INLINE uint32_t getMm() const noexcept { return get(X86Reg::kKindMm); }
ASMJIT_INLINE uint32_t getK() const noexcept { return get(X86Reg::kKindK); }
// --------------------------------------------------------------------------
// [Zero]
// --------------------------------------------------------------------------
ASMJIT_INLINE void zero(uint32_t kind) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp = 0; break;
case X86Reg::kKindVec: _vec = 0; break;
case X86Reg::kKindMm : _mm = 0; break;
case X86Reg::kKindK : _k = 0; break;
}
}
ASMJIT_INLINE void zeroGp() noexcept { zero(X86Reg::kKindGp); }
ASMJIT_INLINE void zeroVec() noexcept { zero(X86Reg::kKindVec); }
ASMJIT_INLINE void zeroMm() noexcept { zero(X86Reg::kKindMm); }
ASMJIT_INLINE void zeroK() noexcept { zero(X86Reg::kKindK); }
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
ASMJIT_INLINE void set(const X86RegMask& other) noexcept {
_packed = other._packed;
}
ASMJIT_INLINE void set(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp = static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm = static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k = static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec = static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void setGp(uint32_t mask) noexcept { return set(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void setVec(uint32_t mask) noexcept { return set(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void setMm(uint32_t mask) noexcept { return set(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void setK(uint32_t mask) noexcept { return set(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [And]
// --------------------------------------------------------------------------
ASMJIT_INLINE void and_(const X86RegMask& other) noexcept {
_packed.and_(other._packed);
}
ASMJIT_INLINE void and_(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp &= static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm &= static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k &= static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec &= static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void andGp(uint32_t mask) noexcept { and_(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void andVec(uint32_t mask) noexcept { and_(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void andMm(uint32_t mask) noexcept { and_(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void andK(uint32_t mask) noexcept { and_(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [AndNot]
// --------------------------------------------------------------------------
ASMJIT_INLINE void andNot(const X86RegMask& other) noexcept {
_packed.andNot(other._packed);
}
ASMJIT_INLINE void andNot(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp &= ~static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm &= ~static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k &= ~static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec &= ~static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void andNotGp(uint32_t mask) noexcept { andNot(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void andNotVec(uint32_t mask) noexcept { andNot(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void andNotMm(uint32_t mask) noexcept { andNot(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void andNotK(uint32_t mask) noexcept { andNot(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [Or]
// --------------------------------------------------------------------------
ASMJIT_INLINE void or_(const X86RegMask& other) noexcept {
_packed.or_(other._packed);
}
ASMJIT_INLINE void or_(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp |= static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm |= static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k |= static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec |= static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void orGp(uint32_t mask) noexcept { return or_(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void orVec(uint32_t mask) noexcept { return or_(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void orMm(uint32_t mask) noexcept { return or_(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void orK(uint32_t mask) noexcept { return or_(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [Xor]
// --------------------------------------------------------------------------
ASMJIT_INLINE void xor_(const X86RegMask& other) noexcept {
_packed.xor_(other._packed);
}
ASMJIT_INLINE void xor_(uint32_t kind, uint32_t mask) noexcept {
ASMJIT_ASSERT(kind < Globals::kMaxVRegKinds);
switch (kind) {
case X86Reg::kKindGp : _gp ^= static_cast<uint16_t>(mask); break;
case X86Reg::kKindMm : _mm ^= static_cast<uint8_t >(mask); break;
case X86Reg::kKindK : _k ^= static_cast<uint8_t >(mask); break;
case X86Reg::kKindVec: _vec ^= static_cast<uint32_t>(mask); break;
}
}
ASMJIT_INLINE void xorGp(uint32_t mask) noexcept { xor_(X86Reg::kKindGp, mask); }
ASMJIT_INLINE void xorVec(uint32_t mask) noexcept { xor_(X86Reg::kKindVec, mask); }
ASMJIT_INLINE void xorMm(uint32_t mask) noexcept { xor_(X86Reg::kKindMm, mask); }
ASMJIT_INLINE void xorK(uint32_t mask) noexcept { xor_(X86Reg::kKindK, mask); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
struct {
//! GP registers mask (16 bits).
uint16_t _gp;
//! MMX registers mask (8 bits).
uint8_t _mm;
//! K registers mask (8 bits).
uint8_t _k;
//! XMM|YMM|ZMM registers mask (32 bits).
uint32_t _vec;
};
//! Packed masks.
UInt64 _packed;
};
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // _ASMJIT_X86_X86MISC_H

View file

@ -0,0 +1,176 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../asmjit_build.h"
#if defined(ASMJIT_BUILD_X86)
// [Dependencies]
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86Operand - Test]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(x86_operand) {
Label L;
INFO("Checking basic properties of built-in X86 registers");
EXPECT(x86::gpb(X86Gp::kIdAx) == x86::al);
EXPECT(x86::gpb(X86Gp::kIdBx) == x86::bl);
EXPECT(x86::gpb(X86Gp::kIdCx) == x86::cl);
EXPECT(x86::gpb(X86Gp::kIdDx) == x86::dl);
EXPECT(x86::gpb_lo(X86Gp::kIdAx) == x86::al);
EXPECT(x86::gpb_lo(X86Gp::kIdBx) == x86::bl);
EXPECT(x86::gpb_lo(X86Gp::kIdCx) == x86::cl);
EXPECT(x86::gpb_lo(X86Gp::kIdDx) == x86::dl);
EXPECT(x86::gpb_hi(X86Gp::kIdAx) == x86::ah);
EXPECT(x86::gpb_hi(X86Gp::kIdBx) == x86::bh);
EXPECT(x86::gpb_hi(X86Gp::kIdCx) == x86::ch);
EXPECT(x86::gpb_hi(X86Gp::kIdDx) == x86::dh);
EXPECT(x86::gpw(X86Gp::kIdAx) == x86::ax);
EXPECT(x86::gpw(X86Gp::kIdBx) == x86::bx);
EXPECT(x86::gpw(X86Gp::kIdCx) == x86::cx);
EXPECT(x86::gpw(X86Gp::kIdDx) == x86::dx);
EXPECT(x86::gpd(X86Gp::kIdAx) == x86::eax);
EXPECT(x86::gpd(X86Gp::kIdBx) == x86::ebx);
EXPECT(x86::gpd(X86Gp::kIdCx) == x86::ecx);
EXPECT(x86::gpd(X86Gp::kIdDx) == x86::edx);
EXPECT(x86::gpq(X86Gp::kIdAx) == x86::rax);
EXPECT(x86::gpq(X86Gp::kIdBx) == x86::rbx);
EXPECT(x86::gpq(X86Gp::kIdCx) == x86::rcx);
EXPECT(x86::gpq(X86Gp::kIdDx) == x86::rdx);
EXPECT(x86::gpb(X86Gp::kIdAx) != x86::dl);
EXPECT(x86::gpw(X86Gp::kIdBx) != x86::cx);
EXPECT(x86::gpd(X86Gp::kIdCx) != x86::ebx);
EXPECT(x86::gpq(X86Gp::kIdDx) != x86::rax);
INFO("Checking if x86::reg(...) matches built-in IDs");
EXPECT(x86::fp(5) == x86::fp5);
EXPECT(x86::mm(5) == x86::mm5);
EXPECT(x86::k(5) == x86::k5);
EXPECT(x86::cr(5) == x86::cr5);
EXPECT(x86::dr(5) == x86::dr5);
EXPECT(x86::xmm(5) == x86::xmm5);
EXPECT(x86::ymm(5) == x86::ymm5);
EXPECT(x86::zmm(5) == x86::zmm5);
INFO("Checking GP register properties");
EXPECT(X86Gp().isReg() == false);
EXPECT(x86::eax.isReg() == true);
EXPECT(x86::eax.getId() == 0);
EXPECT(x86::eax.getSize() == 4);
EXPECT(x86::eax.getType() == X86Reg::kRegGpd);
EXPECT(x86::eax.getKind() == X86Reg::kKindGp);
INFO("Checking FP register properties");
EXPECT(X86Fp().isReg() == false);
EXPECT(x86::fp1.isReg() == true);
EXPECT(x86::fp1.getId() == 1);
EXPECT(x86::fp1.getSize() == 10);
EXPECT(x86::fp1.getType() == X86Reg::kRegFp);
EXPECT(x86::fp1.getKind() == X86Reg::kKindFp);
INFO("Checking MM register properties");
EXPECT(X86Mm().isReg() == false);
EXPECT(x86::mm2.isReg() == true);
EXPECT(x86::mm2.getId() == 2);
EXPECT(x86::mm2.getSize() == 8);
EXPECT(x86::mm2.getType() == X86Reg::kRegMm);
EXPECT(x86::mm2.getKind() == X86Reg::kKindMm);
INFO("Checking K register properties");
EXPECT(X86KReg().isReg() == false);
EXPECT(x86::k3.isReg() == true);
EXPECT(x86::k3.getId() == 3);
EXPECT(x86::k3.getSize() == 0);
EXPECT(x86::k3.getType() == X86Reg::kRegK);
EXPECT(x86::k3.getKind() == X86Reg::kKindK);
INFO("Checking XMM register properties");
EXPECT(X86Xmm().isReg() == false);
EXPECT(x86::xmm4.isReg() == true);
EXPECT(x86::xmm4.getId() == 4);
EXPECT(x86::xmm4.getSize() == 16);
EXPECT(x86::xmm4.getType() == X86Reg::kRegXmm);
EXPECT(x86::xmm4.getKind() == X86Reg::kKindVec);
EXPECT(x86::xmm4.isVec());
INFO("Checking YMM register properties");
EXPECT(X86Ymm().isReg() == false);
EXPECT(x86::ymm5.isReg() == true);
EXPECT(x86::ymm5.getId() == 5);
EXPECT(x86::ymm5.getSize() == 32);
EXPECT(x86::ymm5.getType() == X86Reg::kRegYmm);
EXPECT(x86::ymm5.getKind() == X86Reg::kKindVec);
EXPECT(x86::ymm5.isVec());
INFO("Checking ZMM register properties");
EXPECT(X86Zmm().isReg() == false);
EXPECT(x86::zmm6.isReg() == true);
EXPECT(x86::zmm6.getId() == 6);
EXPECT(x86::zmm6.getSize() == 64);
EXPECT(x86::zmm6.getType() == X86Reg::kRegZmm);
EXPECT(x86::zmm6.getKind() == X86Reg::kKindVec);
EXPECT(x86::zmm6.isVec());
INFO("Checking VEC register properties");
EXPECT(X86Vec().isReg() == false);
// Converts a VEC register to a type of the passed register, but keeps the ID.
EXPECT(x86::xmm4.cloneAs(x86::ymm10) == x86::ymm4);
EXPECT(x86::xmm4.cloneAs(x86::zmm11) == x86::zmm4);
EXPECT(x86::ymm5.cloneAs(x86::xmm12) == x86::xmm5);
EXPECT(x86::ymm5.cloneAs(x86::zmm13) == x86::zmm5);
EXPECT(x86::zmm6.cloneAs(x86::xmm14) == x86::xmm6);
EXPECT(x86::zmm6.cloneAs(x86::ymm15) == x86::ymm6);
INFO("Checking if default constructed regs behave as expected");
EXPECT(X86Reg().isValid() == false);
EXPECT(X86Gp().isValid() == false);
EXPECT(X86Fp().isValid() == false);
EXPECT(X86Mm().isValid() == false);
EXPECT(X86Xmm().isValid() == false);
EXPECT(X86Ymm().isValid() == false);
EXPECT(X86Zmm().isValid() == false);
EXPECT(X86KReg().isValid() == false);
INFO("Checking X86Mem operand");
X86Mem m;
EXPECT(m == X86Mem(),
"Two default constructed X86Mem operands must be equal");
X86Mem mL = x86::ptr(L);
EXPECT(mL.hasBase() == true,
"Memory constructed from Label must hasBase()");
EXPECT(mL.hasBaseReg() == false,
"Memory constructed from Label must not report hasBaseReg()");
EXPECT(mL.hasBaseLabel() == true,
"Memory constructed from Label must report hasBaseLabel()");
}
#endif // ASMJIT_TEST
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,122 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
#define ASMJIT_EXPORTS_X86_OPERAND
// [Guard]
#include "../asmjit_build.h"
#if defined(ASMJIT_BUILD_X86)
// [Dependencies]
#include "../base/misc_p.h"
#include "../x86/x86operand.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86OpData]
// ============================================================================
// Register Operand {
// uint32_t signature;
// uint32_t id;
// uint32_t reserved8_4;
// uint32_t reserved12_4;
// }
#define ASMJIT_X86_REG_01(TYPE, ID) \
{{{ \
uint32_t(X86RegTraits<TYPE>::kSignature), \
uint32_t(ID), \
uint32_t(0), \
uint32_t(0) \
}}}
#define ASMJIT_X86_REG_04(TYPE, ID) \
ASMJIT_X86_REG_01(TYPE, ID + 0 ), \
ASMJIT_X86_REG_01(TYPE, ID + 1 ), \
ASMJIT_X86_REG_01(TYPE, ID + 2 ), \
ASMJIT_X86_REG_01(TYPE, ID + 3 )
#define ASMJIT_X86_REG_07(TYPE, ID) \
ASMJIT_X86_REG_04(TYPE, ID + 0 ), \
ASMJIT_X86_REG_01(TYPE, ID + 4 ), \
ASMJIT_X86_REG_01(TYPE, ID + 5 ), \
ASMJIT_X86_REG_01(TYPE, ID + 6 )
#define ASMJIT_X86_REG_08(TYPE, ID) \
ASMJIT_X86_REG_04(TYPE, ID + 0 ), \
ASMJIT_X86_REG_04(TYPE, ID + 4 )
#define ASMJIT_X86_REG_16(TYPE, ID) \
ASMJIT_X86_REG_08(TYPE, ID + 0 ), \
ASMJIT_X86_REG_08(TYPE, ID + 8 )
#define ASMJIT_X86_REG_32(TYPE, ID) \
ASMJIT_X86_REG_16(TYPE, ID + 0 ), \
ASMJIT_X86_REG_16(TYPE, ID + 16)
const X86OpData x86OpData = {
// --------------------------------------------------------------------------
// [ArchRegs]
// --------------------------------------------------------------------------
{
{
#define ASMJIT_X86_REG_SIGNATURE(TYPE) { X86RegTraits<TYPE>::kSignature }
ASMJIT_TABLE_16(ASMJIT_X86_REG_SIGNATURE, 0),
ASMJIT_TABLE_16(ASMJIT_X86_REG_SIGNATURE, 16)
#undef ASMJIT_X86_REG_SIGNATURE
},
// RegCount[]
{ ASMJIT_TABLE_T_32(X86RegTraits, kCount, 0) },
// RegTypeToTypeId[]
{ ASMJIT_TABLE_T_32(X86RegTraits, kTypeId, 0) }
},
// --------------------------------------------------------------------------
// [Registers]
// --------------------------------------------------------------------------
{ ASMJIT_X86_REG_01(X86Reg::kRegRip , 0) },
{ ASMJIT_X86_REG_07(X86Reg::kRegSeg , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegGpbLo, 0) },
{ ASMJIT_X86_REG_04(X86Reg::kRegGpbHi, 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegGpw , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegGpd , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegGpq , 0) },
{ ASMJIT_X86_REG_08(X86Reg::kRegFp , 0) },
{ ASMJIT_X86_REG_08(X86Reg::kRegMm , 0) },
{ ASMJIT_X86_REG_08(X86Reg::kRegK , 0) },
{ ASMJIT_X86_REG_32(X86Reg::kRegXmm , 0) },
{ ASMJIT_X86_REG_32(X86Reg::kRegYmm , 0) },
{ ASMJIT_X86_REG_32(X86Reg::kRegZmm , 0) },
{ ASMJIT_X86_REG_04(X86Reg::kRegBnd , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegCr , 0) },
{ ASMJIT_X86_REG_16(X86Reg::kRegDr , 0) }
};
#undef ASMJIT_X86_REG_32
#undef ASMJIT_X86_REG_16
#undef ASMJIT_X86_REG_08
#undef ASMJIT_X86_REG_04
#undef ASMJIT_X86_REG_01
#undef ASMJIT_X86_REG_SIGNATURE
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,737 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86REGALLOC_P_H
#define _ASMJIT_X86_X86REGALLOC_P_H
#include "../asmjit_build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies]
#include "../base/codecompiler.h"
#include "../base/regalloc_p.h"
#include "../base/utils.h"
#include "../x86/x86assembler.h"
#include "../x86/x86compiler.h"
#include "../x86/x86misc.h"
// [Api-Begin]
#include "../asmjit_apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::X86RAData]
// ============================================================================
struct X86RAData : public RAData {
ASMJIT_INLINE X86RAData(uint32_t tiedTotal) noexcept : RAData(tiedTotal) {
inRegs.reset();
outRegs.reset();
clobberedRegs.reset();
tiedIndex.reset();
tiedCount.reset();
}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get TiedReg array.
ASMJIT_INLINE TiedReg* getTiedArray() const noexcept {
return const_cast<TiedReg*>(tiedArray);
}
//! Get TiedReg array for a given register `kind`.
ASMJIT_INLINE TiedReg* getTiedArrayByKind(uint32_t kind) const noexcept {
return const_cast<TiedReg*>(tiedArray) + tiedIndex.get(kind);
}
//! Get TiedReg index for a given register `kind`.
ASMJIT_INLINE uint32_t getTiedStart(uint32_t kind) const noexcept {
return tiedIndex.get(kind);
}
//! Get TiedReg count for a given register `kind`.
ASMJIT_INLINE uint32_t getTiedCountByKind(uint32_t kind) const noexcept {
return tiedCount.get(kind);
}
//! Get TiedReg at the specified `index`.
ASMJIT_INLINE TiedReg* getTiedAt(uint32_t index) const noexcept {
ASMJIT_ASSERT(index < tiedTotal);
return getTiedArray() + index;
}
//! Get TiedReg at the specified index for a given register `kind`.
ASMJIT_INLINE TiedReg* getTiedAtByKind(uint32_t kind, uint32_t index) const noexcept {
ASMJIT_ASSERT(index < tiedCount._regs[kind]);
return getTiedArrayByKind(kind) + index;
}
ASMJIT_INLINE void setTiedAt(uint32_t index, TiedReg& tied) noexcept {
ASMJIT_ASSERT(index < tiedTotal);
tiedArray[index] = tied;
}
// --------------------------------------------------------------------------
// [Utils]
// --------------------------------------------------------------------------
//! Find TiedReg.
ASMJIT_INLINE TiedReg* findTied(VirtReg* vreg) const noexcept {
TiedReg* tiedArray = getTiedArray();
uint32_t tiedCount = tiedTotal;
for (uint32_t i = 0; i < tiedCount; i++)
if (tiedArray[i].vreg == vreg)
return &tiedArray[i];
return nullptr;
}
//! Find TiedReg (by class).
ASMJIT_INLINE TiedReg* findTiedByKind(uint32_t kind, VirtReg* vreg) const noexcept {
TiedReg* tiedArray = getTiedArrayByKind(kind);
uint32_t tiedCount = getTiedCountByKind(kind);
for (uint32_t i = 0; i < tiedCount; i++)
if (tiedArray[i].vreg == vreg)
return &tiedArray[i];
return nullptr;
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Special registers on input.
//!
//! Special register(s) restricted to one or more physical register. If there
//! is more than one special register it means that we have to duplicate the
//! variable content to all of them (it means that the same varible was used
//! by two or more operands). We forget about duplicates after the register
//! allocation finishes and marks all duplicates as non-assigned.
X86RegMask inRegs;
//! Special registers on output.
//!
//! Special register(s) used on output. Each variable can have only one
//! special register on the output, 'X86RAData' contains all registers from
//! all 'TiedReg's.
X86RegMask outRegs;
//! Clobbered registers (by a function call).
X86RegMask clobberedRegs;
//! Start indexes of `TiedReg`s per register kind.
X86RegCount tiedIndex;
//! Count of variables per register kind.
X86RegCount tiedCount;
//! Linked registers.
TiedReg tiedArray[1];
};
// ============================================================================
// [asmjit::X86StateCell]
// ============================================================================
//! X86/X64 state-cell.
union X86StateCell {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getState() const noexcept { return _state; }
ASMJIT_INLINE void setState(uint32_t state) noexcept { _state = static_cast<uint8_t>(state); }
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() noexcept { _packed = 0; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
uint8_t _packed;
struct {
uint8_t _state : 2;
uint8_t _unused : 6;
};
};
// ============================================================================
// [asmjit::X86RAState]
// ============================================================================
//! X86/X64 state.
struct X86RAState : RAState {
enum {
//! Base index of GP registers.
kGpIndex = 0,
//! Count of GP registers.
kGpCount = 16,
//! Base index of MMX registers.
kMmIndex = kGpIndex + kGpCount,
//! Count of Mm registers.
kMmCount = 8,
//! Base index of XMM registers.
kXmmIndex = kMmIndex + kMmCount,
//! Count of XMM registers.
kXmmCount = 16,
//! Count of all registers in `X86RAState`.
kAllCount = kXmmIndex + kXmmCount
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE VirtReg** getList() {
return _list;
}
ASMJIT_INLINE VirtReg** getListByKind(uint32_t kind) {
switch (kind) {
case X86Reg::kKindGp : return _listGp;
case X86Reg::kKindMm : return _listMm;
case X86Reg::kKindVec: return _listXmm;
default:
return nullptr;
}
}
// --------------------------------------------------------------------------
// [Clear]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset(size_t numCells) {
::memset(this, 0, kAllCount * sizeof(VirtReg*) +
2 * sizeof(X86RegMask) +
numCells * sizeof(X86StateCell));
}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
union {
//! List of all allocated variables in one array.
VirtReg* _list[kAllCount];
struct {
//! Allocated GP registers.
VirtReg* _listGp[kGpCount];
//! Allocated MMX registers.
VirtReg* _listMm[kMmCount];
//! Allocated XMM registers.
VirtReg* _listXmm[kXmmCount];
};
};
//! Occupied registers (mask).
X86RegMask _occupied;
//! Modified registers (mask).
X86RegMask _modified;
//! Variables data, the length is stored in `X86RAPass`.
X86StateCell _cells[1];
};
// ============================================================================
// [asmjit::X86RAPass]
// ============================================================================
#if defined(ASMJIT_DEBUG)
# define ASMJIT_X86_CHECK_STATE _checkState();
#else
# define ASMJIT_X86_CHECK_STATE
#endif // ASMJIT_DEBUG
//! \internal
//!
//! X86 register allocator pipeline.
//!
//! Takes care of generating function prologs and epilogs, and also performs
//! register allocation.
class X86RAPass : public RAPass {
public:
ASMJIT_NONCOPYABLE(X86RAPass)
typedef RAPass Base;
enum RegOp {
kRegOpMove,
kRegOpLoad,
kRegOpSave
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
X86RAPass() noexcept;
virtual ~X86RAPass() noexcept;
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
virtual Error process(Zone* zone) noexcept override;
virtual Error prepare(CCFunc* func) noexcept override;
// --------------------------------------------------------------------------
// [ArchInfo]
// --------------------------------------------------------------------------
ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _zsp.getSize(); }
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get compiler as `X86Compiler`.
ASMJIT_INLINE X86Compiler* cc() const noexcept { return static_cast<X86Compiler*>(_cb); }
//! Get clobbered registers (global).
ASMJIT_INLINE uint32_t getClobberedRegs(uint32_t kind) noexcept { return _clobberedRegs.get(kind); }
// --------------------------------------------------------------------------
// [Helpers]
// --------------------------------------------------------------------------
ASMJIT_INLINE X86RAData* newRAData(uint32_t tiedTotal) noexcept {
return new(_zone->alloc(sizeof(X86RAData) + tiedTotal * sizeof(TiedReg))) X86RAData(tiedTotal);
}
// --------------------------------------------------------------------------
// [Emit]
// --------------------------------------------------------------------------
// Tiny wrappers that call `X86Internal::emit...()`.
Error emitMove(VirtReg* vreg, uint32_t dstId, uint32_t srcId, const char* reason);
Error emitLoad(VirtReg* vreg, uint32_t id, const char* reason);
Error emitSave(VirtReg* vreg, uint32_t id, const char* reason);
Error emitSwapGp(VirtReg* aVReg, VirtReg* bVReg, uint32_t aId, uint32_t bId, const char* reason) noexcept;
Error emitSwapVec(VirtReg* aVReg, VirtReg* bVReg, uint32_t aId, uint32_t bId, const char* reason) noexcept;
Error emitImmToReg(uint32_t dstTypeId, uint32_t dstPhysId, const Imm* src) noexcept;
Error emitImmToStack(uint32_t dstTypeId, const X86Mem* dst, const Imm* src) noexcept;
Error emitRegToStack(uint32_t dstTypeId, const X86Mem* dst, uint32_t srcTypeId, uint32_t srcPhysId) noexcept;
// --------------------------------------------------------------------------
// [Register Management]
// --------------------------------------------------------------------------
void _checkState();
// --------------------------------------------------------------------------
// [Attach / Detach]
// --------------------------------------------------------------------------
//! Attach.
//!
//! Attach a register to the 'VirtReg', changing 'VirtReg' members to show
//! that the variable is currently alive and linking variable with the
//! current 'X86RAState'.
template<int C>
ASMJIT_INLINE void attach(VirtReg* vreg, uint32_t physId, bool modified) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(physId != Globals::kInvalidRegId);
// Prevent Esp allocation if C==Gp.
ASMJIT_ASSERT(C != X86Reg::kKindGp || physId != X86Gp::kIdSp);
uint32_t regMask = Utils::mask(physId);
vreg->setState(VirtReg::kStateReg);
vreg->setModified(modified);
vreg->setPhysId(physId);
vreg->addHomeId(physId);
_x86State.getListByKind(C)[physId] = vreg;
_x86State._occupied.or_(C, regMask);
_x86State._modified.or_(C, static_cast<uint32_t>(modified) << physId);
ASMJIT_X86_CHECK_STATE
}
//! Detach.
//!
//! The opposite of 'Attach'. Detach resets the members in 'VirtReg'
//! (physId, state and changed flags) and unlinks the variable with the
//! current 'X86RAState'.
template<int C>
ASMJIT_INLINE void detach(VirtReg* vreg, uint32_t physId, uint32_t vState) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vreg->getPhysId() == physId);
ASMJIT_ASSERT(vState != VirtReg::kStateReg);
uint32_t regMask = Utils::mask(physId);
vreg->setState(vState);
vreg->resetPhysId();
vreg->setModified(false);
_x86State.getListByKind(C)[physId] = nullptr;
_x86State._occupied.andNot(C, regMask);
_x86State._modified.andNot(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Rebase]
// --------------------------------------------------------------------------
//! Rebase.
//!
//! Change the register of the 'VirtReg' changing also the current 'X86RAState'.
//! Rebase is nearly identical to 'Detach' and 'Attach' sequence, but doesn't
//! change the `VirtReg`s modified flag.
template<int C>
ASMJIT_INLINE void rebase(VirtReg* vreg, uint32_t newPhysId, uint32_t oldPhysId) {
ASMJIT_ASSERT(vreg->getKind() == C);
uint32_t newRegMask = Utils::mask(newPhysId);
uint32_t oldRegMask = Utils::mask(oldPhysId);
uint32_t bothRegMask = newRegMask ^ oldRegMask;
vreg->setPhysId(newPhysId);
_x86State.getListByKind(C)[oldPhysId] = nullptr;
_x86State.getListByKind(C)[newPhysId] = vreg;
_x86State._occupied.xor_(C, bothRegMask);
_x86State._modified.xor_(C, bothRegMask & -static_cast<int32_t>(vreg->isModified()));
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Load / Save]
// --------------------------------------------------------------------------
//! Load.
//!
//! Load variable from its memory slot to a register, emitting 'Load'
//! instruction and changing the variable state to allocated.
template<int C>
ASMJIT_INLINE void load(VirtReg* vreg, uint32_t physId) {
// Can be only called if variable is not allocated.
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vreg->getState() != VirtReg::kStateReg);
ASMJIT_ASSERT(vreg->getPhysId() == Globals::kInvalidRegId);
emitLoad(vreg, physId, "Load");
attach<C>(vreg, physId, false);
ASMJIT_X86_CHECK_STATE
}
//! Save.
//!
//! Save the variable into its home location, but keep it as allocated.
template<int C>
ASMJIT_INLINE void save(VirtReg* vreg) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vreg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(vreg->getPhysId() != Globals::kInvalidRegId);
uint32_t physId = vreg->getPhysId();
uint32_t regMask = Utils::mask(physId);
emitSave(vreg, physId, "Save");
vreg->setModified(false);
_x86State._modified.andNot(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Move / Swap]
// --------------------------------------------------------------------------
//! Move a register.
//!
//! Move register from one index to another, emitting 'Move' if needed. This
//! function does nothing if register is already at the given index.
template<int C>
ASMJIT_INLINE void move(VirtReg* vreg, uint32_t newPhysId) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vreg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(vreg->getPhysId() != Globals::kInvalidRegId);
uint32_t oldPhysId = vreg->getPhysId();
if (newPhysId != oldPhysId) {
emitMove(vreg, newPhysId, oldPhysId, "Move");
rebase<C>(vreg, newPhysId, oldPhysId);
}
ASMJIT_X86_CHECK_STATE
}
//! Swap two registers
//!
//! It's only possible to swap Gp registers.
ASMJIT_INLINE void swapGp(VirtReg* aVReg, VirtReg* bVReg) {
ASMJIT_ASSERT(aVReg != bVReg);
ASMJIT_ASSERT(aVReg->getKind() == X86Reg::kKindGp);
ASMJIT_ASSERT(aVReg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(aVReg->getPhysId() != Globals::kInvalidRegId);
ASMJIT_ASSERT(bVReg->getKind() == X86Reg::kKindGp);
ASMJIT_ASSERT(bVReg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(bVReg->getPhysId() != Globals::kInvalidRegId);
uint32_t aIndex = aVReg->getPhysId();
uint32_t bIndex = bVReg->getPhysId();
emitSwapGp(aVReg, bVReg, aIndex, bIndex, "Swap");
aVReg->setPhysId(bIndex);
bVReg->setPhysId(aIndex);
_x86State.getListByKind(X86Reg::kKindGp)[aIndex] = bVReg;
_x86State.getListByKind(X86Reg::kKindGp)[bIndex] = aVReg;
uint32_t m = aVReg->isModified() ^ bVReg->isModified();
_x86State._modified.xor_(X86Reg::kKindGp, (m << aIndex) | (m << bIndex));
ASMJIT_X86_CHECK_STATE
}
//! Swap two registers
//!
//! Xor swap on Vec registers.
ASMJIT_INLINE void swapVec(VirtReg* aVReg, VirtReg* bVReg) {
ASMJIT_ASSERT(aVReg != bVReg);
ASMJIT_ASSERT(aVReg->getKind() == X86Reg::kKindVec);
ASMJIT_ASSERT(aVReg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(aVReg->getPhysId() != Globals::kInvalidRegId);
ASMJIT_ASSERT(bVReg->getKind() == X86Reg::kKindVec);
ASMJIT_ASSERT(bVReg->getState() == VirtReg::kStateReg);
ASMJIT_ASSERT(bVReg->getPhysId() != Globals::kInvalidRegId);
uint32_t aIndex = aVReg->getPhysId();
uint32_t bIndex = bVReg->getPhysId();
emitSwapVec(aVReg, bVReg, aIndex, bIndex, "Swap");
aVReg->setPhysId(bIndex);
bVReg->setPhysId(aIndex);
_x86State.getListByKind(X86Reg::kKindVec)[aIndex] = bVReg;
_x86State.getListByKind(X86Reg::kKindVec)[bIndex] = aVReg;
uint32_t m = aVReg->isModified() ^ bVReg->isModified();
_x86State._modified.xor_(X86Reg::kKindVec, (m << aIndex) | (m << bIndex));
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Alloc / Spill]
// --------------------------------------------------------------------------
//! Alloc.
template<int C>
ASMJIT_INLINE void alloc(VirtReg* vreg, uint32_t physId) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(physId != Globals::kInvalidRegId);
uint32_t oldPhysId = vreg->getPhysId();
uint32_t oldState = vreg->getState();
uint32_t regMask = Utils::mask(physId);
ASMJIT_ASSERT(_x86State.getListByKind(C)[physId] == nullptr || physId == oldPhysId);
if (oldState != VirtReg::kStateReg) {
if (oldState == VirtReg::kStateMem)
emitLoad(vreg, physId, "Alloc");
vreg->setModified(false);
}
else if (oldPhysId != physId) {
emitMove(vreg, physId, oldPhysId, "Alloc");
_x86State.getListByKind(C)[oldPhysId] = nullptr;
regMask ^= Utils::mask(oldPhysId);
}
else {
ASMJIT_X86_CHECK_STATE
return;
}
vreg->setState(VirtReg::kStateReg);
vreg->setPhysId(physId);
vreg->addHomeId(physId);
_x86State.getListByKind(C)[physId] = vreg;
_x86State._occupied.xor_(C, regMask);
_x86State._modified.xor_(C, regMask & -static_cast<int32_t>(vreg->isModified()));
ASMJIT_X86_CHECK_STATE
}
//! Spill.
//!
//! Spill variable/register, saves the content to the memory-home if modified.
template<int C>
ASMJIT_INLINE void spill(VirtReg* vreg) {
ASMJIT_ASSERT(vreg->getKind() == C);
if (vreg->getState() != VirtReg::kStateReg) {
ASMJIT_X86_CHECK_STATE
return;
}
uint32_t physId = vreg->getPhysId();
ASMJIT_ASSERT(physId != Globals::kInvalidRegId);
ASMJIT_ASSERT(_x86State.getListByKind(C)[physId] == vreg);
if (vreg->isModified())
emitSave(vreg, physId, "Spill");
detach<C>(vreg, physId, VirtReg::kStateMem);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Modify]
// --------------------------------------------------------------------------
template<int C>
ASMJIT_INLINE void modify(VirtReg* vreg) {
ASMJIT_ASSERT(vreg->getKind() == C);
uint32_t physId = vreg->getPhysId();
uint32_t regMask = Utils::mask(physId);
vreg->setModified(true);
_x86State._modified.or_(C, regMask);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [Unuse]
// --------------------------------------------------------------------------
//! Unuse.
//!
//! Unuse variable, it will be detached it if it's allocated then its state
//! will be changed to VirtReg::kStateNone.
template<int C>
ASMJIT_INLINE void unuse(VirtReg* vreg, uint32_t vState = VirtReg::kStateNone) {
ASMJIT_ASSERT(vreg->getKind() == C);
ASMJIT_ASSERT(vState != VirtReg::kStateReg);
uint32_t physId = vreg->getPhysId();
if (physId != Globals::kInvalidRegId)
detach<C>(vreg, physId, vState);
else
vreg->setState(vState);
ASMJIT_X86_CHECK_STATE
}
// --------------------------------------------------------------------------
// [State]
// --------------------------------------------------------------------------
//! Get state as `X86RAState`.
ASMJIT_INLINE X86RAState* getState() const { return const_cast<X86RAState*>(&_x86State); }
virtual void loadState(RAState* src) override;
virtual RAState* saveState() override;
virtual void switchState(RAState* src) override;
virtual void intersectStates(RAState* a, RAState* b) override;
// --------------------------------------------------------------------------
// [Memory]
// --------------------------------------------------------------------------
ASMJIT_INLINE X86Mem getVarMem(VirtReg* vreg) {
(void)getVarCell(vreg);
return X86Mem(Init,
cc()->_nativeGpReg.getType(), vreg->getId(),
Reg::kRegNone, kInvalidValue,
0, 0, Mem::kSignatureMemRegHomeFlag);
}
// --------------------------------------------------------------------------
// [Fetch]
// --------------------------------------------------------------------------
virtual Error fetch() override;
// --------------------------------------------------------------------------
// [Annotate]
// --------------------------------------------------------------------------
virtual Error annotate() override;
// --------------------------------------------------------------------------
// [Translate]
// --------------------------------------------------------------------------
virtual Error translate() override;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Count of X86/X64 registers.
X86RegCount _regCount;
//! X86/X64 stack-pointer (esp or rsp).
X86Gp _zsp;
//! X86/X64 frame-pointer (ebp or rbp).
X86Gp _zbp;
//! X86/X64 specific compiler state, linked to `_state`.
X86RAState _x86State;
//! Clobbered registers (for the whole function).
X86RegMask _clobberedRegs;
//! Global allocable registers mask.
uint32_t _gaRegs[Globals::kMaxVRegKinds];
bool _avxEnabled;
//! Function variables base pointer (register).
uint8_t _varBaseRegId;
//! Function variables base offset.
int32_t _varBaseOffset;
//! Temporary string builder used for logging.
StringBuilderTmp<256> _stringBuilder;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../asmjit_apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_X86_X86REGALLOC_P_H

View file

@ -616,6 +616,7 @@ file( GLOB HEADER_FILES
common/scripting/vm/*h
common/scripting/jit/*h
common/scripting/interface/*.h
common/scripting/backend/*.h
build/src/*.h
platform/win32/*.h
@ -786,10 +787,12 @@ set (PCH_SOURCES
common/engine/palettecontainer.cpp
common/engine/stringtable.cpp
common/engine/serializer.cpp
common/engine/m_random.cpp
common/objects/dobject.cpp
common/objects/dobjgc.cpp
common/objects/dobjtype.cpp
common/scripting/core/dictionary.cpp
common/scripting/core/dynarrays.cpp
common/scripting/core/symbols.cpp
common/scripting/core/types.cpp
common/scripting/core/scopebarrier.cpp
@ -797,6 +800,8 @@ set (PCH_SOURCES
common/scripting/vm/vmexec.cpp
common/scripting/vm/vmframe.cpp
common/scripting/interface/stringformat.cpp
common/scripting/backend/vmbuilder.cpp
common/scripting/backend/codegen.cpp
core/utility/stats.cpp
@ -959,6 +964,8 @@ include_directories(
common/scripting/vm
common/scripting/jit
common/scripting/core
common/scripting/interface
common/scripting/backend
${CMAKE_BINARY_DIR}/libraries/gdtoa
${SYSTEM_SOURCES_DIR}
@ -1075,6 +1082,8 @@ source_group("Common\\Objects" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/
source_group("Common\\Fonts" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/common/fonts/.+")
source_group("Common\\File System" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/common/filesystem/.+")
source_group("Common\\Scripting" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/common/scripting/.+")
source_group("Common\\Scripting\\Interface" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/common/scripting/interface/.+")
source_group("Common\\Scripting\\Backend" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/common/scripting/backend/.+")
source_group("Common\\Scripting\\Core" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/common/scripting/core/.+")
source_group("Common\\Scripting\\JIT" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/common/scripting/jit/.+")
source_group("Common\\Scripting\\VM" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/common/scripting/vm/.+")

View file

@ -0,0 +1,401 @@
/*
** m_random.cpp
** Random number generators
**
**---------------------------------------------------------------------------
** Copyright 2002-2009 Randy Heit
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
**
** 1. Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**---------------------------------------------------------------------------
**
** This file employs the techniques for improving demo sync and backward
** compatibility that Lee Killough introduced with BOOM. However, none of
** the actual code he wrote is left. In contrast to BOOM, each RNG source
** in ZDoom is implemented as a separate class instance that provides an
** interface to the high-quality Mersenne Twister. See
** <http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html>.
**
** As Killough's description from m_random.h is still mostly relevant,
** here it is:
** killough 2/16/98:
**
** Make every random number generator local to each control-equivalent block.
** Critical for demo sync. The random number generators are made local to
** reduce the chances of sync problems. In Doom, if a single random number
** generator call was off, it would mess up all random number generators.
** This reduces the chances of it happening by making each RNG local to a
** control flow block.
**
** Notes to developers: if you want to reduce your demo sync hassles, follow
** this rule: for each call to P_Random you add, add a new class to the enum
** type below for each block of code which calls P_Random. If two calls to
** P_Random are not in "control-equivalent blocks", i.e. there are any cases
** where one is executed, and the other is not, put them in separate classes.
*/
// HEADER FILES ------------------------------------------------------------
#include <assert.h>
#include "m_random.h"
#include "serializer.h"
#include "m_crc32.h"
#include "c_dispatch.h"
#include "printf.h"
// MACROS ------------------------------------------------------------------
#define RAND_ID MAKE_ID('r','a','N','d')
// TYPES -------------------------------------------------------------------
// EXTERNAL FUNCTION PROTOTYPES --------------------------------------------
// PUBLIC FUNCTION PROTOTYPES ----------------------------------------------
// PRIVATE FUNCTION PROTOTYPES ---------------------------------------------
// EXTERNAL DATA DECLARATIONS ----------------------------------------------
FRandom pr_exrandom;
// PUBLIC DATA DEFINITIONS -------------------------------------------------
FRandom M_Random;
// Global seed. This is modified predictably to initialize every RNG.
uint32_t rngseed;
// Static RNG marker. This is only used when the RNG is set for each new game.
uint32_t staticrngseed;
bool use_staticrng;
// Allows checking or staticly setting the global seed.
CCMD(rngseed)
{
if (argv.argc() == 1)
{
Printf("Usage: rngseed get|set|clear\n");
return;
}
if (stricmp(argv[1], "get") == 0)
{
Printf("rngseed is %d\n", rngseed);
}
else if (stricmp(argv[1], "set") == 0)
{
if (argv.argc() == 2)
{
Printf("You need to specify a value to set\n");
}
else
{
staticrngseed = atoi(argv[2]);
use_staticrng = true;
Printf("Static rngseed %d will be set for next game\n", staticrngseed);
}
}
else if (stricmp(argv[1], "clear") == 0)
{
use_staticrng = false;
Printf("Static rngseed cleared\n");
}
}
// PRIVATE DATA DEFINITIONS ------------------------------------------------
FRandom *FRandom::RNGList;
static TDeletingArray<FRandom *> NewRNGs;
// CODE --------------------------------------------------------------------
//==========================================================================
//
// FRandom - Nameless constructor
//
// Constructing an RNG in this way means it won't be stored in savegames.
//
//==========================================================================
FRandom::FRandom ()
: NameCRC (0)
{
#ifndef NDEBUG
Name = NULL;
#endif
Next = RNGList;
RNGList = this;
Init(0);
}
//==========================================================================
//
// FRandom - Named constructor
//
// This is the standard way to construct RNGs.
//
//==========================================================================
FRandom::FRandom (const char *name)
{
NameCRC = CalcCRC32 ((const uint8_t *)name, (unsigned int)strlen (name));
#ifndef NDEBUG
Name = name;
// A CRC of 0 is reserved for nameless RNGs that don't get stored
// in savegames. The chance is very low that you would get a CRC of 0,
// but it's still possible.
assert (NameCRC != 0);
#endif
// Insert the RNG in the list, sorted by CRC
FRandom **prev = &RNGList, *probe = RNGList;
while (probe != NULL && probe->NameCRC < NameCRC)
{
prev = &probe->Next;
probe = probe->Next;
}
#ifndef NDEBUG
if (probe != NULL)
{
// Because RNGs are identified by their CRCs in save games,
// no two RNGs can have names that hash to the same CRC.
// Obviously, this means every RNG must have a unique name.
assert (probe->NameCRC != NameCRC);
}
#endif
Next = probe;
*prev = this;
Init(0);
}
//==========================================================================
//
// FRandom - Destructor
//
//==========================================================================
FRandom::~FRandom ()
{
FRandom *rng, **prev;
FRandom *last = NULL;
prev = &RNGList;
rng = RNGList;
while (rng != NULL && rng != this)
{
last = rng;
rng = rng->Next;
}
if (rng != NULL)
{
*prev = rng->Next;
}
}
//==========================================================================
//
// FRandom :: StaticClearRandom
//
// Initialize every RNGs. RNGs are seeded based on the global seed and their
// name, so each different RNG can have a different starting value despite
// being derived from a common global seed.
//
//==========================================================================
void FRandom::StaticClearRandom ()
{
// go through each RNG and set each starting seed differently
for (FRandom *rng = FRandom::RNGList; rng != NULL; rng = rng->Next)
{
rng->Init(rngseed);
}
}
//==========================================================================
//
// FRandom :: Init
//
// Initialize a single RNG with a given seed.
//
//==========================================================================
void FRandom::Init(uint32_t seed)
{
// [RH] Use the RNG's name's CRC to modify the original seed.
// This way, new RNGs can be added later, and it doesn't matter
// which order they get initialized in.
SFMTObj::Init(NameCRC, seed);
}
//==========================================================================
//
// FRandom :: StaticWriteRNGState
//
// Stores the state of every RNG into a savegame.
//
//==========================================================================
void FRandom::StaticWriteRNGState (FSerializer &arc)
{
FRandom *rng;
arc("rngseed", rngseed);
if (arc.BeginArray("rngs"))
{
for (rng = FRandom::RNGList; rng != NULL; rng = rng->Next)
{
// Only write those RNGs that have names
if (rng->NameCRC != 0)
{
if (arc.BeginObject(nullptr))
{
arc("crc", rng->NameCRC)
("index", rng->idx)
.Array("u", rng->sfmt.u, SFMT::N32)
.EndObject();
}
}
}
arc.EndArray();
}
}
//==========================================================================
//
// FRandom :: StaticReadRNGState
//
// Restores the state of every RNG from a savegame. RNGs that were added
// since the savegame was created are cleared to their initial value.
//
//==========================================================================
void FRandom::StaticReadRNGState(FSerializer &arc)
{
FRandom *rng;
arc("rngseed", rngseed);
// Call StaticClearRandom in order to ensure that SFMT is initialized
FRandom::StaticClearRandom ();
if (arc.BeginArray("rngs"))
{
int count = arc.ArraySize();
for (int i = 0; i < count; i++)
{
if (arc.BeginObject(nullptr))
{
uint32_t crc;
arc("crc", crc);
for (rng = FRandom::RNGList; rng != NULL; rng = rng->Next)
{
if (rng->NameCRC == crc)
{
arc("index", rng->idx)
.Array("u", rng->sfmt.u, SFMT::N32);
break;
}
}
arc.EndObject();
}
}
arc.EndArray();
}
}
//==========================================================================
//
// FRandom :: StaticFindRNG
//
// This function attempts to find an RNG with the given name.
// If it can't it will create a new one. Duplicate CRCs will
// be ignored and if it happens map to the same RNG.
// This is for use by DECORATE.
//
//==========================================================================
FRandom *FRandom::StaticFindRNG (const char *name)
{
uint32_t NameCRC = CalcCRC32 ((const uint8_t *)name, (unsigned int)strlen (name));
// Use the default RNG if this one happens to have a CRC of 0.
if (NameCRC == 0) return &pr_exrandom;
// Find the RNG in the list, sorted by CRC
FRandom **prev = &RNGList, *probe = RNGList;
while (probe != NULL && probe->NameCRC < NameCRC)
{
prev = &probe->Next;
probe = probe->Next;
}
// Found one so return it.
if (probe == NULL || probe->NameCRC != NameCRC)
{
// A matching RNG doesn't exist yet so create it.
probe = new FRandom(name);
// Store the new RNG for destruction when ZDoom quits.
NewRNGs.Push(probe);
}
return probe;
}
//==========================================================================
//
// FRandom :: StaticPrintSeeds
//
// Prints a snapshot of the current RNG states. This is probably wrong.
//
//==========================================================================
#ifndef NDEBUG
void FRandom::StaticPrintSeeds ()
{
FRandom *rng = RNGList;
while (rng != NULL)
{
int idx = rng->idx < SFMT::N32 ? rng->idx : 0;
Printf ("%s: %08x .. %d\n", rng->Name, rng->sfmt.u[idx], idx);
rng = rng->Next;
}
}
CCMD (showrngs)
{
FRandom::StaticPrintSeeds ();
}
#endif

View file

@ -0,0 +1,193 @@
/*
** m_random.h
** Random number generators
**
**---------------------------------------------------------------------------
** Copyright 2002-2009 Randy Heit
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
**
** 1. Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**---------------------------------------------------------------------------
**
*/
#ifndef __M_RANDOM__
#define __M_RANDOM__
#include <stdio.h>
#include "basics.h"
#include "SFMT/SFMTObj.h"
class FSerializer;
class FRandom : public SFMTObj
{
public:
FRandom ();
FRandom (const char *name);
~FRandom ();
// Returns a random number in the range [0,255]
int operator()()
{
return GenRand32() & 255;
}
// Returns a random number in the range [0,mod)
int operator() (int mod)
{
return (0 == mod)
? 0
: (GenRand32() % mod);
}
// Returns rand# - rand#
int Random2()
{
return Random2(255);
}
// Returns (rand# & mask) - (rand# & mask)
int Random2(int mask)
{
int t = GenRand32() & mask & 255;
return t - (GenRand32() & mask & 255);
}
// HITDICE macro used in Heretic and Hexen
int HitDice(int count)
{
return (1 + (GenRand32() & 7)) * count;
}
int Random() // synonym for ()
{
return operator()();
}
void Init(uint32_t seed);
/* These real versions are due to Isaku Wada */
/** generates a random number on [0,1]-real-interval */
static inline double ToReal1(uint32_t v)
{
return v * (1.0/4294967295.0);
/* divided by 2^32-1 */
}
/** generates a random number on [0,1]-real-interval */
inline double GenRand_Real1()
{
return ToReal1(GenRand32());
}
/** generates a random number on [0,1)-real-interval */
static inline double ToReal2(uint32_t v)
{
return v * (1.0/4294967296.0);
/* divided by 2^32 */
}
/** generates a random number on [0,1)-real-interval */
inline double GenRand_Real2()
{
return ToReal2(GenRand32());
}
/** generates a random number on (0,1)-real-interval */
static inline double ToReal3(uint32_t v)
{
return (((double)v) + 0.5)*(1.0/4294967296.0);
/* divided by 2^32 */
}
/** generates a random number on (0,1)-real-interval */
inline double GenRand_Real3(void)
{
return ToReal3(GenRand32());
}
/** These real versions are due to Isaku Wada */
/** generates a random number on [0,1) with 53-bit resolution*/
static inline double ToRes53(uint64_t v)
{
return v * (1.0/18446744073709551616.0L);
}
/** generates a random number on [0,1) with 53-bit resolution from two
* 32 bit integers */
static inline double ToRes53Mix(uint32_t x, uint32_t y)
{
return ToRes53(x | ((uint64_t)y << 32));
}
/** generates a random number on [0,1) with 53-bit resolution
*/
inline double GenRand_Res53(void)
{
return ToRes53(GenRand64());
}
/** generates a random number on [0,1) with 53-bit resolution
using 32bit integer.
*/
inline double GenRand_Res53_Mix()
{
uint32_t x, y;
x = GenRand32();
y = GenRand32();
return ToRes53Mix(x, y);
}
// Static interface
static void StaticClearRandom ();
static void StaticReadRNGState (FSerializer &arc);
static void StaticWriteRNGState (FSerializer &file);
static FRandom *StaticFindRNG(const char *name);
#ifndef NDEBUG
static void StaticPrintSeeds ();
#endif
private:
#ifndef NDEBUG
const char *Name;
#endif
FRandom *Next;
uint32_t NameCRC;
static FRandom *RNGList;
};
extern uint32_t rngseed; // The starting seed (not part of state)
extern uint32_t staticrngseed; // Static rngseed that can be set by the user
extern bool use_staticrng;
// M_Random can be used for numbers that do not affect gameplay
extern FRandom M_Random;
#endif

View file

@ -57,7 +57,6 @@
// PRIVATE FUNCTION PROTOTYPES ---------------------------------------------
// EXTERNAL DATA DECLARATIONS ----------------------------------------------
EXTERN_CVAR(Bool, strictdecorate);
// PUBLIC DATA DEFINITIONS -------------------------------------------------
FMemArena ClassDataAllocator(32768); // use this for all static class data that can be released in bulk when the type system is shut down.

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,236 @@
#ifndef VMUTIL_H
#define VMUTIL_H
#include "dobject.h"
#include "vmintern.h"
#include <vector>
#include <functional>
class VMFunctionBuilder;
class FxExpression;
class FxLocalVariableDeclaration;
struct ExpEmit
{
ExpEmit() : RegNum(0), RegType(REGT_NIL), RegCount(1), Konst(false), Fixed(false), Final(false), Target(false) {}
ExpEmit(int reg, int type, bool konst = false, bool fixed = false) : RegNum(reg), RegType(type), RegCount(1), Konst(konst), Fixed(fixed), Final(false), Target(false) {}
ExpEmit(VMFunctionBuilder *build, int type, int count = 1);
void Free(VMFunctionBuilder *build);
void Reuse(VMFunctionBuilder *build);
uint16_t RegNum;
uint8_t RegType, RegCount;
// We are at 8 bytes for this struct, no matter what, so it's rather pointless to squeeze these flags into bitfields.
bool Konst, Fixed, Final, Target;
};
class VMFunctionBuilder
{
public:
// Keeps track of which registers are available by way of a bitmask table.
class RegAvailability
{
public:
RegAvailability();
int GetMostUsed() { return MostUsed; }
int Get(int count); // Returns the first register in the range
void Return(int reg, int count);
bool Reuse(int regnum);
bool IsDirty(int reg) const
{
const int firstword = reg / 32;
const int firstbit = reg & 31;
return Dirty[firstword] & (1 << firstbit);
}
private:
VM_UWORD Used[256/32]; // Bitmap of used registers (bit set means reg is used)
VM_UWORD Dirty[256/32];
int MostUsed;
friend class VMFunctionBuilder;
};
VMFunctionBuilder(int numimplicits);
~VMFunctionBuilder();
void BeginStatement(FxExpression *stmt);
void EndStatement();
void MakeFunction(VMScriptFunction *func);
// Returns the constant register holding the value.
unsigned GetConstantInt(int val);
unsigned GetConstantFloat(double val);
unsigned GetConstantAddress(void *ptr);
unsigned GetConstantString(FString str);
unsigned AllocConstantsInt(unsigned int count, int *values);
unsigned AllocConstantsFloat(unsigned int count, double *values);
unsigned AllocConstantsAddress(unsigned int count, void **ptrs);
unsigned AllocConstantsString(unsigned int count, FString *ptrs);
// Returns the address of the next instruction to be emitted.
size_t GetAddress();
// Returns the address of the newly-emitted instruction.
size_t Emit(int opcode, int opa, int opb, int opc);
size_t Emit(int opcode, int opa, VM_SHALF opbc);
size_t Emit(int opcode, int opabc);
size_t EmitLoadInt(int regnum, int value);
size_t EmitRetInt(int retnum, bool final, int value);
void Backpatch(size_t addr, size_t target);
void BackpatchToHere(size_t addr);
void BackpatchList(TArray<size_t> &addrs, size_t target);
void BackpatchListToHere(TArray<size_t> &addrs);
// Write out complete constant tables.
void FillIntConstants(int *konst);
void FillFloatConstants(double *konst);
void FillAddressConstants(FVoidObj *konst);
void FillStringConstants(FString *strings);
// PARAM increases ActiveParam; CALL decreases it.
void ParamChange(int delta);
// Track available registers.
RegAvailability Registers[4];
// amount of implicit parameters so that proper code can be emitted for method calls
int NumImplicits;
// keep the frame pointer, if needed, in a register because the LFP opcode is hideously inefficient, requiring more than 20 instructions on x64.
ExpEmit FramePointer;
TArray<FxLocalVariableDeclaration *> ConstructedStructs;
private:
TArray<FStatementInfo> LineNumbers;
TArray<FxExpression *> StatementStack;
TArray<int> IntConstantList;
TArray<double> FloatConstantList;
TArray<void *> AddressConstantList;
TArray<FString> StringConstantList;
// These map from the constant value to its position in the constant table.
TMap<int, unsigned> IntConstantMap;
TMap<double, unsigned> FloatConstantMap;
TMap<void *, unsigned> AddressConstantMap;
TMap<FString, unsigned> StringConstantMap;
int MaxParam;
int ActiveParam;
TArray<VMOP> Code;
};
void DumpFunction(FILE *dump, VMScriptFunction *sfunc, const char *label, int labellen);
//==========================================================================
//
//
//
//==========================================================================
class FxExpression;
class FFunctionBuildList
{
struct Item
{
PFunction *Func = nullptr;
FxExpression *Code = nullptr;
PPrototype *Proto = nullptr;
VMScriptFunction *Function = nullptr;
PNamespace *CurGlobals = nullptr;
FString PrintableName;
int StateIndex;
int StateCount;
int Lump;
VersionInfo Version;
bool FromDecorate;
};
TArray<Item> mItems;
void DumpJit();
public:
VMFunction *AddFunction(PNamespace *curglobals, const VersionInfo &ver, PFunction *func, FxExpression *code, const FString &name, bool fromdecorate, int currentstate, int statecnt, int lumpnum);
void Build();
};
extern FFunctionBuildList FunctionBuildList;
//==========================================================================
//
// Function call parameter collector
//
//==========================================================================
extern int EncodeRegType(ExpEmit reg);
class FunctionCallEmitter
{
// std::function and TArray are not compatible so this has to use std::vector instead.
std::vector<std::function<int(VMFunctionBuilder *)>> emitters;
TArray<std::pair<int, int>> returns;
TArray<uint8_t> reginfo;
unsigned numparams = 0; // This counts the number of pushed elements, which can differ from the number of emitters with vectors.
VMFunction *target = nullptr;
int virtualselfreg = -1;
public:
FunctionCallEmitter(VMFunction *func)
{
target = func;
}
void SetVirtualReg(int virtreg)
{
virtualselfreg = virtreg;
}
void AddParameter(VMFunctionBuilder *build, FxExpression *operand);
void AddParameter(ExpEmit &emit, bool reference);
void AddParameterPointerConst(void *konst);
void AddParameterPointer(int index, bool konst);
void AddParameterFloatConst(double konst);
void AddParameterIntConst(int konst);
void AddParameterStringConst(const FString &konst);
ExpEmit EmitCall(VMFunctionBuilder *build, TArray<ExpEmit> *ReturnRegs = nullptr);
void AddReturn(int regtype, int regcount = 1)
{
returns.Push({ regtype, regcount });
}
unsigned Count() const
{
return numparams;
}
};
class VMDisassemblyDumper
{
public:
enum FileOperationType
{
Overwrite,
Append
};
explicit VMDisassemblyDumper(const FileOperationType operation);
~VMDisassemblyDumper();
void Write(VMScriptFunction *sfunc, const FString &fname);
void Flush();
private:
FILE *dump = nullptr;
FString namefilter;
int codesize = 0;
int datasize = 0;
};
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,111 @@
//
//---------------------------------------------------------------------------
//
// Copyright(C) 2018 Christoph Oelckers
// All rights reserved.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program. If not, see http://www.gnu.org/licenses/
//
//--------------------------------------------------------------------------
//
/*
** gl_viewpointbuffer.cpp
** Buffer data maintenance for per viewpoint uniform data
**
**/
#include "hwrenderer/data/shaderuniforms.h"
#include "hwrenderer/scene/hw_viewpointuniforms.h"
#include "hwrenderer/scene/hw_drawinfo.h"
#include "hwrenderer/scene/hw_renderstate.h"
#include "hw_viewpointbuffer.h"
static const int INITIAL_BUFFER_SIZE = 100; // 100 viewpoints per frame should nearly always be enough
HWViewpointBuffer::HWViewpointBuffer()
{
mBufferSize = INITIAL_BUFFER_SIZE;
mBlockAlign = ((sizeof(HWViewpointUniforms) / screen->uniformblockalignment) + 1) * screen->uniformblockalignment;
mByteSize = mBufferSize * mBlockAlign;
mBuffer = screen->CreateDataBuffer(VIEWPOINT_BINDINGPOINT, false, true);
mBuffer->SetData(mByteSize, nullptr, false);
Clear();
mLastMappedIndex = UINT_MAX;
mClipPlaneInfo.Push(0);
}
HWViewpointBuffer::~HWViewpointBuffer()
{
delete mBuffer;
}
void HWViewpointBuffer::CheckSize()
{
if (mUploadIndex >= mBufferSize)
{
mBufferSize *= 2;
mByteSize *= 2;
mBuffer->Resize(mByteSize);
m2DHeight = m2DWidth = -1;
}
}
int HWViewpointBuffer::Bind(FRenderState &di, unsigned int index)
{
if (index != mLastMappedIndex)
{
mLastMappedIndex = index;
mBuffer->BindRange(&di, index * mBlockAlign, mBlockAlign);
di.EnableClipDistance(0, mClipPlaneInfo[index]);
}
return index;
}
void HWViewpointBuffer::Set2D(FRenderState &di, int width, int height)
{
if (width != m2DWidth || height != m2DHeight)
{
HWViewpointUniforms matrices;
matrices.SetDefaults(nullptr);
matrices.mProjectionMatrix.ortho(0, (float)width, (float)height, 0, -1.0f, 1.0f);
matrices.CalcDependencies();
mBuffer->Map();
memcpy(mBuffer->Memory(), &matrices, sizeof(matrices));
mBuffer->Unmap();
m2DWidth = width;
m2DHeight = height;
mLastMappedIndex = -1;
}
Bind(di, 0);
}
int HWViewpointBuffer::SetViewpoint(FRenderState &di, HWViewpointUniforms *vp)
{
CheckSize();
mBuffer->Map();
memcpy(((char*)mBuffer->Memory()) + mUploadIndex * mBlockAlign, vp, sizeof(*vp));
mBuffer->Unmap();
mClipPlaneInfo.Push(vp->mClipHeightDirection != 0.f || vp->mClipLine.X > -10000000.0f);
return Bind(di, mUploadIndex++);
}
void HWViewpointBuffer::Clear()
{
// Index 0 is reserved for the 2D projection.
mUploadIndex = 1;
mClipPlaneInfo.Resize(1);
}

View file

@ -0,0 +1,35 @@
#include "tarray.h"
#include "hwrenderer/data/buffers.h"
struct HWViewpointUniforms;
class FRenderState;
class HWViewpointBuffer
{
IDataBuffer *mBuffer;
unsigned int mBufferSize;
unsigned int mBlockAlign;
unsigned int mUploadIndex;
unsigned int mLastMappedIndex;
unsigned int mByteSize;
TArray<bool> mClipPlaneInfo;
int m2DWidth = -1, m2DHeight = -1;
unsigned int mBlockSize;
void CheckSize();
public:
HWViewpointBuffer();
~HWViewpointBuffer();
void Clear();
int Bind(FRenderState &di, unsigned int index);
void Set2D(FRenderState &di, int width, int height);
int SetViewpoint(FRenderState &di, HWViewpointUniforms *vp);
unsigned int GetBlockSize() const { return mBlockSize; }
};

View file

@ -0,0 +1,33 @@
#pragma once
#include "matrix.h"
#include "r_utility.h"
struct HWDrawInfo;
struct HWViewpointUniforms
{
VSMatrix mProjectionMatrix;
VSMatrix mViewMatrix;
VSMatrix mNormalViewMatrix;
FVector4 mCameraPos;
FVector4 mClipLine;
float mGlobVis = 1.f;
int mPalLightLevels = 0;
int mViewHeight = 0;
float mClipHeight = 0.f;
float mClipHeightDirection = 0.f;
int mShadowmapFilter = 1;
void CalcDependencies()
{
mNormalViewMatrix.computeNormalMatrix(mViewMatrix);
}
void SetDefaults(HWDrawInfo *drawInfo);
};