214 bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
215 bool inline_unsafe_allocate();
216 bool inline_unsafe_copyMemory();
217 bool inline_native_currentThread();
218 #ifdef TRACE_HAVE_INTRINSICS
219 bool inline_native_classID();
220 bool inline_native_threadID();
221 #endif
222 bool inline_native_time_funcs(address method, const char* funcName);
223 bool inline_native_isInterrupted();
224 bool inline_native_Class_query(vmIntrinsics::ID id);
225 bool inline_native_subtype_check();
226
227 bool inline_native_newArray();
228 bool inline_native_getLength();
229 bool inline_array_copyOf(bool is_copyOfRange);
230 bool inline_array_equals();
231 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
232 bool inline_native_clone(bool is_virtual);
233 bool inline_native_Reflection_getCallerClass();
234 bool is_method_invoke_or_aux_frame(JVMState* jvms);
235 // Helper function for inlining native object hash method
236 bool inline_native_hashcode(bool is_virtual, bool is_static);
237 bool inline_native_getClass();
238
239 // Helper functions for inlining arraycopy
240 bool inline_arraycopy();
241 void generate_arraycopy(const TypePtr* adr_type,
242 BasicType basic_elem_type,
243 Node* src, Node* src_offset,
244 Node* dest, Node* dest_offset,
245 Node* copy_length,
246 bool disjoint_bases = false,
247 bool length_never_negative = false,
248 RegionNode* slow_region = NULL);
249 AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
250 RegionNode* slow_region);
251 void generate_clear_array(const TypePtr* adr_type,
252 Node* dest,
253 BasicType basic_elem_type,
254 Node* slice_off,
376 if (!InlineObjectHash) return NULL;
377 break;
378 case vmIntrinsics::_clone:
379 case vmIntrinsics::_copyOf:
380 case vmIntrinsics::_copyOfRange:
381 if (!InlineObjectCopy) return NULL;
382 // These also use the arraycopy intrinsic mechanism:
383 if (!InlineArrayCopy) return NULL;
384 break;
385 case vmIntrinsics::_encodeISOArray:
386 if (!SpecialEncodeISOArray) return NULL;
387 if (!Matcher::match_rule_supported(Op_EncodeISOArray)) return NULL;
388 break;
389 case vmIntrinsics::_checkIndex:
390 // We do not intrinsify this. The optimizer does fine with it.
391 return NULL;
392
393 case vmIntrinsics::_getCallerClass:
394 if (!UseNewReflection) return NULL;
395 if (!InlineReflectionGetCallerClass) return NULL;
396 if (!JDK_Version::is_gte_jdk14x_version()) return NULL;
397 break;
398
399 case vmIntrinsics::_bitCount_i:
400 if (!Matcher::match_rule_supported(Op_PopCountI)) return NULL;
401 break;
402
403 case vmIntrinsics::_bitCount_l:
404 if (!Matcher::match_rule_supported(Op_PopCountL)) return NULL;
405 break;
406
407 case vmIntrinsics::_numberOfLeadingZeros_i:
408 if (!Matcher::match_rule_supported(Op_CountLeadingZerosI)) return NULL;
409 break;
410
411 case vmIntrinsics::_numberOfLeadingZeros_l:
412 if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL;
413 break;
414
415 case vmIntrinsics::_numberOfTrailingZeros_i:
416 if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL;
3855 // Return the combined state.
3856 set_i_o( _gvn.transform(result_io) );
3857 set_all_memory( _gvn.transform(result_mem) );
3858
3859 set_result(result_reg, result_val);
3860 return true;
3861 }
3862
3863 //---------------------------inline_native_getClass----------------------------
3864 // public final native Class<?> java.lang.Object.getClass();
3865 //
3866 // Build special case code for calls to getClass on an object.
3867 bool LibraryCallKit::inline_native_getClass() {
3868 Node* obj = null_check_receiver();
3869 if (stopped()) return true;
3870 set_result(load_mirror_from_klass(load_object_klass(obj)));
3871 return true;
3872 }
3873
3874 //-----------------inline_native_Reflection_getCallerClass---------------------
3875 // public static native Class<?> sun.reflect.Reflection.getCallerClass(int realFramesToSkip);
3876 //
3877 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
3878 //
3879 // NOTE that this code must perform the same logic as
3880 // vframeStream::security_get_caller_frame in that it must skip
3881 // Method.invoke() and auxiliary frames.
3882 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
3883 #ifndef PRODUCT
3884 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3885 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
3886 }
3887 #endif
3888
3889 Node* caller_depth_node = argument(0);
3890
3891 // The depth value must be a constant in order for the runtime call
3892 // to be eliminated.
3893 const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int();
3894 if (caller_depth_type == NULL || !caller_depth_type->is_con()) {
3895 #ifndef PRODUCT
3896 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3897 tty->print_cr(" Bailing out because caller depth was not a constant");
3898 }
3899 #endif
3900 return false;
3901 }
3902 // Note that the JVM state at this point does not include the
3903 // getCallerClass() frame which we are trying to inline. The
3904 // semantics of getCallerClass(), however, are that the "first"
3905 // frame is the getCallerClass() frame, so we subtract one from the
3906 // requested depth before continuing. We don't inline requests of
3907 // getCallerClass(0).
3908 int caller_depth = caller_depth_type->get_con() - 1;
3909 if (caller_depth < 0) {
3910 #ifndef PRODUCT
3911 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3912 tty->print_cr(" Bailing out because caller depth was %d", caller_depth);
3913 }
3914 #endif
3915 return false;
3916 }
3917
3918 if (!jvms()->has_method()) {
3919 #ifndef PRODUCT
3920 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3921 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
3922 }
3923 #endif
3924 return false;
3925 }
3926 int _depth = jvms()->depth(); // cache call chain depth
3927
3928 // Walk back up the JVM state to find the caller at the required
3929 // depth. NOTE that this code must perform the same logic as
3930 // vframeStream::security_get_caller_frame in that it must skip
3931 // Method.invoke() and auxiliary frames. Note also that depth is
3932 // 1-based (1 is the bottom of the inlining).
3933 int inlining_depth = _depth;
3934 JVMState* caller_jvms = NULL;
3935
3936 if (inlining_depth > 0) {
3937 caller_jvms = jvms();
3938 assert(caller_jvms = jvms()->of_depth(inlining_depth), "inlining_depth == our depth");
3939 do {
3940 // The following if-tests should be performed in this order
3941 if (is_method_invoke_or_aux_frame(caller_jvms)) {
3942 // Skip a Method.invoke() or auxiliary frame
3943 } else if (caller_depth > 0) {
3944 // Skip real frame
3945 --caller_depth;
3946 } else {
3947 // We're done: reached desired caller after skipping.
3948 break;
3949 }
3950 caller_jvms = caller_jvms->caller();
3951 --inlining_depth;
3952 } while (inlining_depth > 0);
3953 }
3954
3955 if (inlining_depth == 0) {
3956 #ifndef PRODUCT
3957 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3958 tty->print_cr(" Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth);
3959 tty->print_cr(" JVM state at this point:");
3960 for (int i = _depth; i >= 1; i--) {
3961 ciMethod* m = jvms()->of_depth(i)->method();
3962 tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
3963 }
3964 }
3965 #endif
3966 return false; // Reached end of inlining
3967 }
3968
3969 // Acquire method holder as java.lang.Class
3970 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
3971 ciInstance* caller_mirror = caller_klass->java_mirror();
3972
3973 // Push this as a constant
3974 set_result(makecon(TypeInstPtr::make(caller_mirror)));
3975
3976 #ifndef PRODUCT
3977 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3978 tty->print_cr(" Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth);
3979 tty->print_cr(" JVM state at this point:");
3980 for (int i = _depth; i >= 1; i--) {
3981 ciMethod* m = jvms()->of_depth(i)->method();
3982 tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
3983 }
3984 }
3985 #endif
3986 return true;
3987 }
3988
3989 // Helper routine for above
3990 bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
3991 ciMethod* method = jvms->method();
3992
3993 // Is this the Method.invoke method itself?
3994 if (method->intrinsic_id() == vmIntrinsics::_invoke)
3995 return true;
3996
3997 // Is this a helper, defined somewhere underneath MethodAccessorImpl.
3998 ciKlass* k = method->holder();
3999 if (k->is_instance_klass()) {
4000 ciInstanceKlass* ik = k->as_instance_klass();
4001 for (; ik != NULL; ik = ik->super()) {
4002 if (ik->name() == ciSymbol::sun_reflect_MethodAccessorImpl() &&
4003 ik == env()->find_system_klass(ik->name())) {
4004 return true;
4005 }
4006 }
4007 }
4008 else if (method->is_method_handle_intrinsic() ||
4009 method->is_compiled_lambda_form()) {
4010 // This is an internal adapter frame from the MethodHandleCompiler -- skip it
4011 return true;
4012 }
4013
4014 return false;
4015 }
4016
4017 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4018 Node* arg = argument(0);
4019 Node* result;
4020
4021 switch (id) {
4022 case vmIntrinsics::_floatToRawIntBits: result = new (C) MoveF2INode(arg); break;
4023 case vmIntrinsics::_intBitsToFloat: result = new (C) MoveI2FNode(arg); break;
4024 case vmIntrinsics::_doubleToRawLongBits: result = new (C) MoveD2LNode(arg); break;
4025 case vmIntrinsics::_longBitsToDouble: result = new (C) MoveL2DNode(arg); break;
4026
4027 case vmIntrinsics::_doubleToLongBits: {
4028 // two paths (plus control) merge in a wood
4029 RegionNode *r = new (C) RegionNode(3);
4030 Node *phi = new (C) PhiNode(r, TypeLong::LONG);
4031
4032 Node *cmpisnan = _gvn.transform(new (C) CmpDNode(arg, arg));
4033 // Build the boolean node
4034 Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
|
214 bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
215 bool inline_unsafe_allocate();
216 bool inline_unsafe_copyMemory();
217 bool inline_native_currentThread();
218 #ifdef TRACE_HAVE_INTRINSICS
219 bool inline_native_classID();
220 bool inline_native_threadID();
221 #endif
222 bool inline_native_time_funcs(address method, const char* funcName);
223 bool inline_native_isInterrupted();
224 bool inline_native_Class_query(vmIntrinsics::ID id);
225 bool inline_native_subtype_check();
226
227 bool inline_native_newArray();
228 bool inline_native_getLength();
229 bool inline_array_copyOf(bool is_copyOfRange);
230 bool inline_array_equals();
231 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
232 bool inline_native_clone(bool is_virtual);
233 bool inline_native_Reflection_getCallerClass();
234 // Helper function for inlining native object hash method
235 bool inline_native_hashcode(bool is_virtual, bool is_static);
236 bool inline_native_getClass();
237
238 // Helper functions for inlining arraycopy
239 bool inline_arraycopy();
240 void generate_arraycopy(const TypePtr* adr_type,
241 BasicType basic_elem_type,
242 Node* src, Node* src_offset,
243 Node* dest, Node* dest_offset,
244 Node* copy_length,
245 bool disjoint_bases = false,
246 bool length_never_negative = false,
247 RegionNode* slow_region = NULL);
248 AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
249 RegionNode* slow_region);
250 void generate_clear_array(const TypePtr* adr_type,
251 Node* dest,
252 BasicType basic_elem_type,
253 Node* slice_off,
375 if (!InlineObjectHash) return NULL;
376 break;
377 case vmIntrinsics::_clone:
378 case vmIntrinsics::_copyOf:
379 case vmIntrinsics::_copyOfRange:
380 if (!InlineObjectCopy) return NULL;
381 // These also use the arraycopy intrinsic mechanism:
382 if (!InlineArrayCopy) return NULL;
383 break;
384 case vmIntrinsics::_encodeISOArray:
385 if (!SpecialEncodeISOArray) return NULL;
386 if (!Matcher::match_rule_supported(Op_EncodeISOArray)) return NULL;
387 break;
388 case vmIntrinsics::_checkIndex:
389 // We do not intrinsify this. The optimizer does fine with it.
390 return NULL;
391
392 case vmIntrinsics::_getCallerClass:
393 if (!UseNewReflection) return NULL;
394 if (!InlineReflectionGetCallerClass) return NULL;
395 if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) return NULL;
396 break;
397
398 case vmIntrinsics::_bitCount_i:
399 if (!Matcher::match_rule_supported(Op_PopCountI)) return NULL;
400 break;
401
402 case vmIntrinsics::_bitCount_l:
403 if (!Matcher::match_rule_supported(Op_PopCountL)) return NULL;
404 break;
405
406 case vmIntrinsics::_numberOfLeadingZeros_i:
407 if (!Matcher::match_rule_supported(Op_CountLeadingZerosI)) return NULL;
408 break;
409
410 case vmIntrinsics::_numberOfLeadingZeros_l:
411 if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL;
412 break;
413
414 case vmIntrinsics::_numberOfTrailingZeros_i:
415 if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL;
3854 // Return the combined state.
3855 set_i_o( _gvn.transform(result_io) );
3856 set_all_memory( _gvn.transform(result_mem) );
3857
3858 set_result(result_reg, result_val);
3859 return true;
3860 }
3861
3862 //---------------------------inline_native_getClass----------------------------
3863 // public final native Class<?> java.lang.Object.getClass();
3864 //
3865 // Build special case code for calls to getClass on an object.
3866 bool LibraryCallKit::inline_native_getClass() {
3867 Node* obj = null_check_receiver();
3868 if (stopped()) return true;
3869 set_result(load_mirror_from_klass(load_object_klass(obj)));
3870 return true;
3871 }
3872
3873 //-----------------inline_native_Reflection_getCallerClass---------------------
3874 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
3875 //
3876 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
3877 //
3878 // NOTE: This code must perform the same logic as JVM_GetCallerClass
3879 // in that it must skip particular security frames and checks for
3880 // caller sensitive methods.
3881 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
3882 #ifndef PRODUCT
3883 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3884 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
3885 }
3886 #endif
3887
3888 if (!jvms()->has_method()) {
3889 #ifndef PRODUCT
3890 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3891 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
3892 }
3893 #endif
3894 return false;
3895 }
3896
3897 // Walk back up the JVM state to find the caller at the required
3898 // depth.
3899 JVMState* caller_jvms = jvms();
3900
3901 // Cf. JVM_GetCallerClass
3902 // NOTE: Start the loop at depth 1 because the current JVM state does
3903 // not include the Reflection.getCallerClass() frame.
3904 for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
3905 ciMethod* m = caller_jvms->method();
3906 switch (n) {
3907 case 0:
3908 fatal("current JVM state does not include the Reflection.getCallerClass frame");
3909 break;
3910 case 1:
3911 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
3912 if (!m->caller_sensitive()) {
3913 #ifndef PRODUCT
3914 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3915 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
3916 }
3917 #endif
3918 return false; // bail-out; let JVM_GetCallerClass do the work
3919 }
3920 break;
3921 default:
3922 if (!m->is_ignored_by_security_stack_walk()) {
3923 // We have reached the desired frame; return the holder class.
3924 // Acquire method holder as java.lang.Class and push as constant.
3925 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
3926 ciInstance* caller_mirror = caller_klass->java_mirror();
3927 set_result(makecon(TypeInstPtr::make(caller_mirror)));
3928
3929 #ifndef PRODUCT
3930 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3931 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
3932 tty->print_cr(" JVM state at this point:");
3933 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
3934 ciMethod* m = jvms()->of_depth(i)->method();
3935 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
3936 }
3937 }
3938 #endif
3939 return true;
3940 }
3941 break;
3942 }
3943 }
3944
3945 #ifndef PRODUCT
3946 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3947 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
3948 tty->print_cr(" JVM state at this point:");
3949 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
3950 ciMethod* m = jvms()->of_depth(i)->method();
3951 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
3952 }
3953 }
3954 #endif
3955
3956 return false; // bail-out; let JVM_GetCallerClass do the work
3957 }
3958
3959 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
3960 Node* arg = argument(0);
3961 Node* result;
3962
3963 switch (id) {
3964 case vmIntrinsics::_floatToRawIntBits: result = new (C) MoveF2INode(arg); break;
3965 case vmIntrinsics::_intBitsToFloat: result = new (C) MoveI2FNode(arg); break;
3966 case vmIntrinsics::_doubleToRawLongBits: result = new (C) MoveD2LNode(arg); break;
3967 case vmIntrinsics::_longBitsToDouble: result = new (C) MoveL2DNode(arg); break;
3968
3969 case vmIntrinsics::_doubleToLongBits: {
3970 // two paths (plus control) merge in a wood
3971 RegionNode *r = new (C) RegionNode(3);
3972 Node *phi = new (C) PhiNode(r, TypeLong::LONG);
3973
3974 Node *cmpisnan = _gvn.transform(new (C) CmpDNode(arg, arg));
3975 // Build the boolean node
3976 Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
|