/* * Copyright (C) 2009, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef MacroAssemblerCodeRef_h #define MacroAssemblerCodeRef_h #include "Disassembler.h" #include "ExecutableAllocator.h" #include "LLIntData.h" #include #include #include #include // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid // instruction address on the platform (for example, check any alignment requirements). #if CPU(ARM_THUMB2) && ENABLE(JIT) // ARM instructions must be 16-bit aligned. Thumb2 code pointers to be loaded into // into the processor are decorated with the bottom bit set, while traditional ARM has // the lower bit clear. Since we don't know what kind of pointer, we check for both // decorated and undecorated null. #define ASSERT_VALID_CODE_POINTER(ptr) \ ASSERT(reinterpret_cast(ptr) & ~1) #define ASSERT_VALID_CODE_OFFSET(offset) \ ASSERT(!(offset & 1)) // Must be multiple of 2. #else #define ASSERT_VALID_CODE_POINTER(ptr) \ ASSERT(ptr) #define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes! #endif #if CPU(X86) && OS(WINDOWS) #define CALLING_CONVENTION_IS_STDCALL 1 #ifndef CDECL #if COMPILER(MSVC) #define CDECL __cdecl #else #define CDECL __attribute__ ((__cdecl)) #endif // COMPILER(MSVC) #endif // CDECL #else #define CALLING_CONVENTION_IS_STDCALL 0 #endif #if CPU(X86) #define HAS_FASTCALL_CALLING_CONVENTION 1 #ifndef FASTCALL #if COMPILER(MSVC) #define FASTCALL __fastcall #else #define FASTCALL __attribute__ ((fastcall)) #endif // COMPILER(MSVC) #endif // FASTCALL #else #define HAS_FASTCALL_CALLING_CONVENTION 0 #endif // CPU(X86) namespace JSC { // FunctionPtr: // // FunctionPtr should be used to wrap pointers to C/C++ functions in JSC // (particularly, the stub functions). class FunctionPtr { public: FunctionPtr() : m_value(0) { } template FunctionPtr(returnType(*value)()) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType(*value)(argType1)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType(*value)(argType1, argType2)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType(*value)(argType1, argType2, argType3)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } // MSVC doesn't seem to treat functions with different calling conventions as // different types; these methods already defined for fastcall, below. #if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS) template FunctionPtr(returnType (CDECL *value)()) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType (CDECL *value)(argType1)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType (CDECL *value)(argType1, argType2)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3, argType4)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } #endif #if HAS_FASTCALL_CALLING_CONVENTION template FunctionPtr(returnType (FASTCALL *value)()) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType (FASTCALL *value)(argType1)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType (FASTCALL *value)(argType1, argType2)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } template FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3, argType4)) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } #endif template explicit FunctionPtr(FunctionType* value) // Using a C-ctyle cast here to avoid compiler error on RVTC: // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers // (I guess on RVTC function pointers have a different constness to GCC/MSVC?) : m_value((void*)value) { ASSERT_VALID_CODE_POINTER(m_value); } void* value() const { return m_value; } void* executableAddress() const { return m_value; } private: void* m_value; }; // ReturnAddressPtr: // // ReturnAddressPtr should be used to wrap return addresses generated by processor // 'call' instructions exectued in JIT code. We use return addresses to look up // exception and optimization information, and to repatch the call instruction // that is the source of the return address. class ReturnAddressPtr { public: ReturnAddressPtr() : m_value(0) { } explicit ReturnAddressPtr(void* value) : m_value(value) { ASSERT_VALID_CODE_POINTER(m_value); } explicit ReturnAddressPtr(FunctionPtr function) : m_value(function.value()) { ASSERT_VALID_CODE_POINTER(m_value); } void* value() const { return m_value; } void dump(PrintStream& out) const { out.print(RawPointer(m_value)); } private: void* m_value; }; // MacroAssemblerCodePtr: // // MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code. class MacroAssemblerCodePtr { public: MacroAssemblerCodePtr() : m_value(0) { } explicit MacroAssemblerCodePtr(void* value) #if CPU(ARM_THUMB2) // Decorate the pointer as a thumb code pointer. : m_value(reinterpret_cast(value) + 1) #else : m_value(value) #endif { ASSERT_VALID_CODE_POINTER(m_value); } static MacroAssemblerCodePtr createFromExecutableAddress(void* value) { ASSERT_VALID_CODE_POINTER(value); MacroAssemblerCodePtr result; result.m_value = value; return result; } static MacroAssemblerCodePtr createLLIntCodePtr(OpcodeID codeId) { return createFromExecutableAddress(LLInt::getCodePtr(codeId)); } explicit MacroAssemblerCodePtr(ReturnAddressPtr ra) : m_value(ra.value()) { ASSERT_VALID_CODE_POINTER(m_value); } void* executableAddress() const { return m_value; } #if CPU(ARM_THUMB2) // To use this pointer as a data address remove the decoration. void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast(m_value) - 1; } #else void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; } #endif explicit operator bool() const { return m_value; } bool operator==(const MacroAssemblerCodePtr& other) const { return m_value == other.m_value; } void dumpWithName(const char* name, PrintStream& out) const { if (!m_value) { out.print(name, "(null)"); return; } if (executableAddress() == dataLocation()) { out.print(name, "(", RawPointer(executableAddress()), ")"); return; } out.print(name, "(executable = ", RawPointer(executableAddress()), ", dataLocation = ", RawPointer(dataLocation()), ")"); } void dump(PrintStream& out) const { dumpWithName("CodePtr", out); } enum EmptyValueTag { EmptyValue }; enum DeletedValueTag { DeletedValue }; MacroAssemblerCodePtr(EmptyValueTag) : m_value(emptyValue()) { } MacroAssemblerCodePtr(DeletedValueTag) : m_value(deletedValue()) { } bool isEmptyValue() const { return m_value == emptyValue(); } bool isDeletedValue() const { return m_value == deletedValue(); } unsigned hash() const { return PtrHash::hash(m_value); } private: static void* emptyValue() { return bitwise_cast(static_cast(1)); } static void* deletedValue() { return bitwise_cast(static_cast(2)); } void* m_value; }; struct MacroAssemblerCodePtrHash { static unsigned hash(const MacroAssemblerCodePtr& ptr) { return ptr.hash(); } static bool equal(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b) { return a == b; } static const bool safeToCompareToEmptyOrDeleted = true; }; // MacroAssemblerCodeRef: // // A reference to a section of JIT generated code. A CodeRef consists of a // pointer to the code, and a ref pointer to the pool from within which it // was allocated. class MacroAssemblerCodeRef { private: // This is private because it's dangerous enough that we want uses of it // to be easy to find - hence the static create method below. explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr) : m_codePtr(codePtr) { ASSERT(m_codePtr); } public: MacroAssemblerCodeRef() { } MacroAssemblerCodeRef(PassRefPtr executableMemory) : m_codePtr(executableMemory->start()) , m_executableMemory(executableMemory) { ASSERT(m_executableMemory->isManaged()); ASSERT(m_executableMemory->start()); ASSERT(m_codePtr); } // Use this only when you know that the codePtr refers to code that is // already being kept alive through some other means. Typically this means // that codePtr is immortal. static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr) { return MacroAssemblerCodeRef(codePtr); } // Helper for creating self-managed code refs from LLInt. static MacroAssemblerCodeRef createLLIntCodeRef(OpcodeID codeId) { return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId))); } ExecutableMemoryHandle* executableMemory() const { return m_executableMemory.get(); } MacroAssemblerCodePtr code() const { return m_codePtr; } size_t size() const { if (!m_executableMemory) return 0; return m_executableMemory->sizeInBytes(); } bool tryToDisassemble(const char* prefix) const { return JSC::tryToDisassemble(m_codePtr, size(), prefix, WTF::dataFile()); } explicit operator bool() const { return !!m_codePtr; } void dump(PrintStream& out) const { m_codePtr.dumpWithName("CodeRef", out); } private: MacroAssemblerCodePtr m_codePtr; RefPtr m_executableMemory; }; } // namespace JSC namespace WTF { template struct DefaultHash; template<> struct DefaultHash { typedef JSC::MacroAssemblerCodePtrHash Hash; }; template struct HashTraits; template<> struct HashTraits : public CustomHashTraits { }; } // namespace WTF #endif // MacroAssemblerCodeRef_h