--- old/src/share/vm/opto/library_call.cpp 2015-07-14 18:39:24.223577155 +0200 +++ new/src/share/vm/opto/library_call.cpp 2015-07-14 18:39:23.915577146 +0200 @@ -304,42 +304,76 @@ bool inline_isCompileConstant(); }; - -//---------------------------make_vm_intrinsic---------------------------- -CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) { - vmIntrinsics::ID id = m->intrinsic_id(); +bool Compile::intrinsic_does_virtual_dispatch_for(vmIntrinsics::ID id) { assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); + switch(id) { + case vmIntrinsics::_hashCode: + case vmIntrinsics::_clone: + return true; + break; + default: + return false; + } +} +int Compile::intrinsic_predicates_needed_for(vmIntrinsics::ID id) { + assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); + switch (id) { + case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt: + case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt: + return 1; + case vmIntrinsics::_digestBase_implCompressMB: + return 3; + default: + return 0; + } +} - ccstr disable_intr = NULL; - - if ((DisableIntrinsic[0] != '\0' - && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) || - (method_has_option_value("DisableIntrinsic", disable_intr) - && strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL)) { - // disabled by a user request on the command line: - // example: -XX:DisableIntrinsic=_hashCode,_getClass - return NULL; +bool Compile::is_intrinsic_available_for(Method* method, Method* compilation_context, bool is_virtual) { + vmIntrinsics::ID id = method->intrinsic_id(); + if (id < vmIntrinsics::FIRST_ID || id >= vmIntrinsics::LAST_COMPILER_INLINE) { + return false; } - if (!m->is_loaded()) { - // do not attempt to inline unloaded methods - return NULL; + // Check if the intrinsic corresponding to 'method' has been disabled on + // the command line by using the DisableIntrinsic flag (either globally + // or on a per-method level, see src/share/vm/compiler/abstractCompiler.hpp + // for details). + // Usually, the compilation context is the caller of the method 'method'. + // The only case when for a non-recursive method 'method' the compilation context + // is not the caller of the 'method' (but it is the method itself) is + // java.lang.ref.Referene::get. + // For java.lang.ref.Reference::get, the intrinsic version is used + // instead of the C2-compiled version so that the value in the referent + // field can be registered by the G1 pre-barrier code. The intrinsified + // version of Reference::get also adds a memory barrier to prevent + // commoning reads from the referent field across safepoint since GC + // can change the referent field's value. See Compile::Compile() + // in src/share/vm/opto/compile.cpp for more details. + ccstr disable_intr = NULL; + if ((DisableIntrinsic[0] != '\0' && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) || + (compilation_context != NULL && + CompilerOracle::has_option_value(compilation_context, "DisableIntrinsic", disable_intr) && + strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL) + ) { + return false; } - // Only a few intrinsics implement a virtual dispatch. - // They are expensive calls which are also frequently overridden. + // Only Object.hashCode and Object.clone intrinsics implement also a virtual + // dispatch because calling both methods is expensive but both methods are + // frequently overridden. All other intrinsics implement only a non-virtual + // dispatch. if (is_virtual) { switch (id) { case vmIntrinsics::_hashCode: case vmIntrinsics::_clone: - // OK, Object.hashCode and Object.clone intrinsics come in both flavors break; default: - return NULL; + return false; } } - // -XX:-InlineNatives disables nearly all intrinsics: + // -XX:-InlineNatives disables nearly all intrinsics except the ones listed in + // the following switch statement. if (!InlineNatives) { switch (id) { case vmIntrinsics::_indexOf: @@ -354,237 +388,189 @@ case vmIntrinsics::_loadFence: case vmIntrinsics::_storeFence: case vmIntrinsics::_fullFence: - break; // InlineNatives does not control String.compareTo case vmIntrinsics::_Reference_get: - break; // InlineNatives does not control Reference.get + break; default: - return NULL; + return false; } } - int predicates = 0; - bool does_virtual_dispatch = false; - switch (id) { case vmIntrinsics::_compareTo: - if (!SpecialStringCompareTo) return NULL; - if (!Matcher::match_rule_supported(Op_StrComp)) return NULL; + if (!SpecialStringCompareTo) return false; + if (!Matcher::match_rule_supported(Op_StrComp)) return false; break; case vmIntrinsics::_indexOf: - if (!SpecialStringIndexOf) return NULL; + if (!SpecialStringIndexOf) return false; break; case vmIntrinsics::_equals: - if (!SpecialStringEquals) return NULL; - if (!Matcher::match_rule_supported(Op_StrEquals)) return NULL; + if (!SpecialStringEquals) return false; + if (!Matcher::match_rule_supported(Op_StrEquals)) return false; break; case vmIntrinsics::_equalsC: - if (!SpecialArraysEquals) return NULL; - if (!Matcher::match_rule_supported(Op_AryEq)) return NULL; + if (!SpecialArraysEquals) return false; + if (!Matcher::match_rule_supported(Op_AryEq)) return false; break; case vmIntrinsics::_arraycopy: - if (!InlineArrayCopy) return NULL; + if (!InlineArrayCopy) return false; break; case vmIntrinsics::_copyMemory: - if (StubRoutines::unsafe_arraycopy() == NULL) return NULL; - if (!InlineArrayCopy) return NULL; + if (!InlineArrayCopy) return false; + if (StubRoutines::unsafe_arraycopy() == NULL) return false; break; case vmIntrinsics::_hashCode: - if (!InlineObjectHash) return NULL; - does_virtual_dispatch = true; + if (!InlineObjectHash) return false; break; case vmIntrinsics::_clone: - does_virtual_dispatch = true; case vmIntrinsics::_copyOf: case vmIntrinsics::_copyOfRange: - if (!InlineObjectCopy) return NULL; - // These also use the arraycopy intrinsic mechanism: - if (!InlineArrayCopy) return NULL; + // These intrinsics use both the objectcopy and the arraycopy + // intrinsic mechanism. + if (!InlineObjectCopy || !InlineArrayCopy) return false; break; case vmIntrinsics::_encodeISOArray: - if (!SpecialEncodeISOArray) return NULL; - if (!Matcher::match_rule_supported(Op_EncodeISOArray)) return NULL; + if (!SpecialEncodeISOArray) return false; + if (!Matcher::match_rule_supported(Op_EncodeISOArray)) return false; break; case vmIntrinsics::_checkIndex: // We do not intrinsify this. The optimizer does fine with it. - return NULL; - + return false; case vmIntrinsics::_getCallerClass: - if (!InlineReflectionGetCallerClass) return NULL; - if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) return NULL; + if (!InlineReflectionGetCallerClass || + SystemDictionary::reflect_CallerSensitive_klass() == NULL) { + return false; + } break; - case vmIntrinsics::_bitCount_i: - if (!Matcher::match_rule_supported(Op_PopCountI)) return NULL; + if (!Matcher::match_rule_supported(Op_PopCountI)) return false; break; - case vmIntrinsics::_bitCount_l: - if (!Matcher::match_rule_supported(Op_PopCountL)) return NULL; + if (!Matcher::match_rule_supported(Op_PopCountL)) return false; break; - case vmIntrinsics::_numberOfLeadingZeros_i: - if (!Matcher::match_rule_supported(Op_CountLeadingZerosI)) return NULL; + if (!Matcher::match_rule_supported(Op_CountLeadingZerosI)) return false; break; - case vmIntrinsics::_numberOfLeadingZeros_l: - if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL; + if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return false; break; - case vmIntrinsics::_numberOfTrailingZeros_i: - if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL; + if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return false; break; - case vmIntrinsics::_numberOfTrailingZeros_l: - if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL; + if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return false; break; - case vmIntrinsics::_reverseBytes_c: - if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return NULL; + if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return false; break; case vmIntrinsics::_reverseBytes_s: - if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return NULL; + if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return false; break; case vmIntrinsics::_reverseBytes_i: - if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return NULL; + if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return false; break; case vmIntrinsics::_reverseBytes_l: - if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return NULL; + if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return false; break; - - case vmIntrinsics::_Reference_get: - // Use the intrinsic version of Reference.get() so that the value in - // the referent field can be registered by the G1 pre-barrier code. - // Also add memory barrier to prevent commoning reads from this field - // across safepoint since GC can change it value. - break; - case vmIntrinsics::_compareAndSwapObject: #ifdef _LP64 - if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return NULL; +if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return false; #endif - break; - +break; case vmIntrinsics::_compareAndSwapLong: - if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return NULL; + if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return false; break; - case vmIntrinsics::_getAndAddInt: - if (!Matcher::match_rule_supported(Op_GetAndAddI)) return NULL; + if (!Matcher::match_rule_supported(Op_GetAndAddI)) return false; break; - case vmIntrinsics::_getAndAddLong: - if (!Matcher::match_rule_supported(Op_GetAndAddL)) return NULL; + if (!Matcher::match_rule_supported(Op_GetAndAddL)) return false; break; - case vmIntrinsics::_getAndSetInt: - if (!Matcher::match_rule_supported(Op_GetAndSetI)) return NULL; + if (!Matcher::match_rule_supported(Op_GetAndSetI)) return false; break; - case vmIntrinsics::_getAndSetLong: - if (!Matcher::match_rule_supported(Op_GetAndSetL)) return NULL; + if (!Matcher::match_rule_supported(Op_GetAndSetL)) return false; break; - case vmIntrinsics::_getAndSetObject: #ifdef _LP64 - if (!UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetP)) return NULL; - if (UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetN)) return NULL; + if (!UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetP)) return false; + if (UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetN)) return false; break; #else - if (!Matcher::match_rule_supported(Op_GetAndSetP)) return NULL; + if (!Matcher::match_rule_supported(Op_GetAndSetP)) return false; break; #endif - case vmIntrinsics::_aescrypt_encryptBlock: case vmIntrinsics::_aescrypt_decryptBlock: - if (!UseAESIntrinsics) return NULL; + if (!UseAESIntrinsics) return false; break; - case vmIntrinsics::_multiplyToLen: - if (!UseMultiplyToLenIntrinsic) return NULL; + if (!UseMultiplyToLenIntrinsic) return false; break; - case vmIntrinsics::_squareToLen: - if (!UseSquareToLenIntrinsic) return NULL; + if (!UseSquareToLenIntrinsic) return false; break; - case vmIntrinsics::_mulAdd: - if (!UseMulAddIntrinsic) return NULL; + if (!UseMulAddIntrinsic) return false; break; - case vmIntrinsics::_montgomeryMultiply: - if (!UseMontgomeryMultiplyIntrinsic) return NULL; + if (!UseMontgomeryMultiplyIntrinsic) return false; break; case vmIntrinsics::_montgomerySquare: - if (!UseMontgomerySquareIntrinsic) return NULL; + if (!UseMontgomerySquareIntrinsic) return false; break; - case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt: case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt: - if (!UseAESIntrinsics) return NULL; - // these two require the predicated logic - predicates = 1; + if (!UseAESIntrinsics) return false; break; - case vmIntrinsics::_sha_implCompress: - if (!UseSHA1Intrinsics) return NULL; + if (!UseSHA1Intrinsics) return false; break; - case vmIntrinsics::_sha2_implCompress: - if (!UseSHA256Intrinsics) return NULL; + if (!UseSHA256Intrinsics) return false; break; - case vmIntrinsics::_sha5_implCompress: - if (!UseSHA512Intrinsics) return NULL; + if (!UseSHA512Intrinsics) return false; break; - case vmIntrinsics::_digestBase_implCompressMB: - if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) return NULL; - predicates = 3; + if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) return false; break; - case vmIntrinsics::_ghash_processBlocks: - if (!UseGHASHIntrinsics) return NULL; + if (!UseGHASHIntrinsics) return false; break; - case vmIntrinsics::_updateCRC32: case vmIntrinsics::_updateBytesCRC32: case vmIntrinsics::_updateByteBufferCRC32: - if (!UseCRC32Intrinsics) return NULL; + if (!UseCRC32Intrinsics) return false; break; - case vmIntrinsics::_updateBytesCRC32C: case vmIntrinsics::_updateDirectByteBufferCRC32C: - if (!UseCRC32CIntrinsics) return NULL; + if (!UseCRC32CIntrinsics) return false; break; - case vmIntrinsics::_incrementExactI: case vmIntrinsics::_addExactI: - if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL; + if (!UseMathExactIntrinsics || !Matcher::match_rule_supported(Op_OverflowAddI)) return false; break; case vmIntrinsics::_incrementExactL: case vmIntrinsics::_addExactL: - if (!Matcher::match_rule_supported(Op_OverflowAddL) || !UseMathExactIntrinsics) return NULL; + if (!UseMathExactIntrinsics || !Matcher::match_rule_supported(Op_OverflowAddL)) return false; break; case vmIntrinsics::_decrementExactI: case vmIntrinsics::_subtractExactI: - if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL; + case vmIntrinsics::_negateExactI: + if (!UseMathExactIntrinsics || !Matcher::match_rule_supported(Op_OverflowSubI)) return false; break; case vmIntrinsics::_decrementExactL: case vmIntrinsics::_subtractExactL: - if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL; - break; - case vmIntrinsics::_negateExactI: - if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL; - break; case vmIntrinsics::_negateExactL: - if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL; + if (!UseMathExactIntrinsics || !Matcher::match_rule_supported(Op_OverflowSubL)) return false; break; case vmIntrinsics::_multiplyExactI: - if (!Matcher::match_rule_supported(Op_OverflowMulI) || !UseMathExactIntrinsics) return NULL; + if (!UseMathExactIntrinsics || !Matcher::match_rule_supported(Op_OverflowMulI)) return false; break; case vmIntrinsics::_multiplyExactL: - if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return NULL; + if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return false; break; - case vmIntrinsics::_getShortUnaligned: case vmIntrinsics::_getCharUnaligned: case vmIntrinsics::_getIntUnaligned: @@ -593,41 +579,72 @@ case vmIntrinsics::_putCharUnaligned: case vmIntrinsics::_putIntUnaligned: case vmIntrinsics::_putLongUnaligned: - if (!UseUnalignedAccesses) return NULL; + if (!UseUnalignedAccesses) return false; break; - - default: - assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); - assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); + default: break; } + Symbol* method_holder = method->method_holder()->name(); // -XX:-InlineClassNatives disables natives from the Class class. // The flag applies to all reflective calls, notably Array.newArray // (visible to Java programmers as Array.newInstance). - if (m->holder()->name() == ciSymbol::java_lang_Class() || - m->holder()->name() == ciSymbol::java_lang_reflect_Array()) { - if (!InlineClassNatives) return NULL; + if (method_holder == vmSymbols::java_lang_Class() || + method_holder == vmSymbols::java_lang_reflect_Array()) { + if (!InlineClassNatives) return false; } // -XX:-InlineThreadNatives disables natives from the Thread class. - if (m->holder()->name() == ciSymbol::java_lang_Thread()) { - if (!InlineThreadNatives) return NULL; + if (method_holder == vmSymbols::java_lang_Thread()) { + if (!InlineThreadNatives) return false; } // -XX:-InlineMathNatives disables natives from the Math,Float and Double classes. - if (m->holder()->name() == ciSymbol::java_lang_Math() || - m->holder()->name() == ciSymbol::java_lang_Float() || - m->holder()->name() == ciSymbol::java_lang_Double()) { - if (!InlineMathNatives) return NULL; + if (method_holder == vmSymbols::java_lang_Math() || + method_holder == vmSymbols::java_lang_Float() || + method_holder == vmSymbols::java_lang_Double()) { + if (!InlineMathNatives) return false; } // -XX:-InlineUnsafeOps disables natives from the Unsafe class. - if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) { - if (!InlineUnsafeOps) return NULL; + if (method_holder == vmSymbols::sun_misc_Unsafe()) { + if (!InlineUnsafeOps) return false; + } + + return true; +} + +//---------------------------make_vm_intrinsic---------------------------- +CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) { + vmIntrinsics::ID id = m->intrinsic_id(); + assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); + + if (!m->is_loaded()) { + // Do not attempt to inline unloaded methods. + return NULL; + } + + bool is_available = false; + { + // For calling is_intrinsic_available we need to transition to + // the '_thread_in_vm' stat because is_intrinsic_available_for() + // accesses VM-internal data. + VM_ENTRY_MARK; + methodHandle mh(THREAD, m->get_Method()); + methodHandle ct(THREAD, method()->get_Method()); + is_available = is_intrinsic_available_for(mh(), ct(), is_virtual); } - return new LibraryIntrinsic(m, is_virtual, predicates, does_virtual_dispatch, (vmIntrinsics::ID) id); + if (is_available) { + assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); + assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); + return new LibraryIntrinsic(m, is_virtual, + intrinsic_predicates_needed_for(id), + intrinsic_does_virtual_dispatch_for(id), + (vmIntrinsics::ID) id); + } else { + return NULL; + } } //----------------------register_library_intrinsics-----------------------