183 }
184 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
185 return generate_array_guard_common(kls, region, true, false);
186 }
187 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
188 return generate_array_guard_common(kls, region, true, true);
189 }
190 Node* generate_array_guard_common(Node* kls, RegionNode* region,
191 bool obj_array, bool not_array);
192 Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
193 CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
194 bool is_virtual = false, bool is_static = false);
195 CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
196 return generate_method_call(method_id, false, true);
197 }
198 CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
199 return generate_method_call(method_id, true, false);
200 }
201 Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
202
203 Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2);
204 Node* make_string_method_node(int opcode, Node* str1, Node* str2);
205 bool inline_string_compareTo();
206 bool inline_string_indexOf();
207 Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i);
208 bool inline_string_equals();
209 Node* round_double_node(Node* n);
210 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
211 bool inline_math_native(vmIntrinsics::ID id);
212 bool inline_trig(vmIntrinsics::ID id);
213 bool inline_math(vmIntrinsics::ID id);
214 template <typename OverflowOp>
215 bool inline_math_overflow(Node* arg1, Node* arg2);
216 void inline_math_mathExact(Node* math, Node* test);
217 bool inline_math_addExactI(bool is_increment);
218 bool inline_math_addExactL(bool is_increment);
219 bool inline_math_multiplyExactI();
220 bool inline_math_multiplyExactL();
221 bool inline_math_negateExactI();
222 bool inline_math_negateExactL();
223 bool inline_math_subtractExactI(bool is_decrement);
224 bool inline_math_subtractExactL(bool is_decrement);
225 bool inline_pow();
226 Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
227 bool inline_min_max(vmIntrinsics::ID id);
228 bool inline_notify(vmIntrinsics::ID id);
234 // Generates the guards that check whether the result of
235 // Unsafe.getObject should be recorded in an SATB log buffer.
236 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
237 bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
238 static bool klass_needs_init_guard(Node* kls);
239 bool inline_unsafe_allocate();
240 bool inline_unsafe_copyMemory();
241 bool inline_native_currentThread();
242 #ifdef TRACE_HAVE_INTRINSICS
243 bool inline_native_classID();
244 bool inline_native_threadID();
245 #endif
246 bool inline_native_time_funcs(address method, const char* funcName);
247 bool inline_native_isInterrupted();
248 bool inline_native_Class_query(vmIntrinsics::ID id);
249 bool inline_native_subtype_check();
250
251 bool inline_native_newArray();
252 bool inline_native_getLength();
253 bool inline_array_copyOf(bool is_copyOfRange);
254 bool inline_array_equals();
255 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
256 bool inline_native_clone(bool is_virtual);
257 bool inline_native_Reflection_getCallerClass();
258 // Helper function for inlining native object hash method
259 bool inline_native_hashcode(bool is_virtual, bool is_static);
260 bool inline_native_getClass();
261
262 // Helper functions for inlining arraycopy
263 bool inline_arraycopy();
264 AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
265 RegionNode* slow_region);
266 JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
267 void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp);
268
269 typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
270 bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind);
271 bool inline_unsafe_ordered_store(BasicType type);
272 bool inline_unsafe_fence(vmIntrinsics::ID id);
273 bool inline_fp_conversions(vmIntrinsics::ID id);
274 bool inline_number_methods(vmIntrinsics::ID id);
281 Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
282 bool inline_ghash_processBlocks();
283 bool inline_sha_implCompress(vmIntrinsics::ID id);
284 bool inline_digestBase_implCompressMB(int predicate);
285 bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
286 bool long_state, address stubAddr, const char *stubName,
287 Node* src_start, Node* ofs, Node* limit);
288 Node* get_state_from_sha_object(Node *sha_object);
289 Node* get_state_from_sha5_object(Node *sha_object);
290 Node* inline_digestBase_implCompressMB_predicate(int predicate);
291 bool inline_encodeISOArray();
292 bool inline_updateCRC32();
293 bool inline_updateBytesCRC32();
294 bool inline_updateByteBufferCRC32();
295 Node* get_table_from_crc32c_class(ciInstanceKlass *crc32c_class);
296 bool inline_updateBytesCRC32C();
297 bool inline_updateDirectByteBufferCRC32C();
298 bool inline_updateBytesAdler32();
299 bool inline_updateByteBufferAdler32();
300 bool inline_multiplyToLen();
301 bool inline_squareToLen();
302 bool inline_mulAdd();
303 bool inline_montgomeryMultiply();
304 bool inline_montgomerySquare();
305
306 bool inline_profileBoolean();
307 bool inline_isCompileConstant();
308 };
309
310 //---------------------------make_vm_intrinsic----------------------------
311 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
312 vmIntrinsics::ID id = m->intrinsic_id();
313 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
314
315 if (!m->is_loaded()) {
316 // Do not attempt to inline unloaded methods.
317 return NULL;
318 }
319
320 C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
440 // The intrinsic bailed out
441 if (C->print_intrinsics() || C->print_inlining()) {
442 if (jvms->has_method()) {
443 // Not a root compile.
444 const char* msg = "failed to generate predicate for intrinsic";
445 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
446 } else {
447 // Root compile
448 C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
449 vmIntrinsics::name_at(intrinsic_id()),
450 (is_virtual() ? " (virtual)" : ""), bci);
451 }
452 }
453 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
454 return NULL;
455 }
456
457 bool LibraryCallKit::try_to_inline(int predicate) {
458 // Handle symbolic names for otherwise undistinguished boolean switches:
459 const bool is_store = true;
460 const bool is_native_ptr = true;
461 const bool is_static = true;
462 const bool is_volatile = true;
463
464 if (!jvms()->has_method()) {
465 // Root JVMState has a null method.
466 assert(map()->memory()->Opcode() == Op_Parm, "");
467 // Insert the memory aliasing node
468 set_all_memory(reset_memory());
469 }
470 assert(merged_memory(), "");
471
472
473 switch (intrinsic_id()) {
474 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
475 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
476 case vmIntrinsics::_getClass: return inline_native_getClass();
477
478 case vmIntrinsics::_dsin:
479 case vmIntrinsics::_dcos:
494 if (InlineNotify) {
495 return inline_notify(intrinsic_id());
496 }
497 return false;
498
499 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
500 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
501 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
502 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
503 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
504 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
505 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
506 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
507 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
508 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
509 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
510 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
511
512 case vmIntrinsics::_arraycopy: return inline_arraycopy();
513
514 case vmIntrinsics::_compareTo: return inline_string_compareTo();
515 case vmIntrinsics::_indexOf: return inline_string_indexOf();
516 case vmIntrinsics::_equals: return inline_string_equals();
517
518 case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile);
519 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile);
520 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile);
521 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile);
522 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile);
523 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile);
524 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile);
525 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile);
526 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile);
527 case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile);
528 case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile);
529 case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile);
530 case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile);
531 case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile);
532 case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile);
533 case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile);
534 case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile);
535 case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile);
536
599 case vmIntrinsics::_loadFence:
600 case vmIntrinsics::_storeFence:
601 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
602
603 case vmIntrinsics::_currentThread: return inline_native_currentThread();
604 case vmIntrinsics::_isInterrupted: return inline_native_isInterrupted();
605
606 #ifdef TRACE_HAVE_INTRINSICS
607 case vmIntrinsics::_classID: return inline_native_classID();
608 case vmIntrinsics::_threadID: return inline_native_threadID();
609 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
610 #endif
611 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
612 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
613 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
614 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
615 case vmIntrinsics::_newArray: return inline_native_newArray();
616 case vmIntrinsics::_getLength: return inline_native_getLength();
617 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
618 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
619 case vmIntrinsics::_equalsC: return inline_array_equals();
620 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
621
622 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
623
624 case vmIntrinsics::_isInstance:
625 case vmIntrinsics::_getModifiers:
626 case vmIntrinsics::_isInterface:
627 case vmIntrinsics::_isArray:
628 case vmIntrinsics::_isPrimitive:
629 case vmIntrinsics::_getSuperclass:
630 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
631
632 case vmIntrinsics::_floatToRawIntBits:
633 case vmIntrinsics::_floatToIntBits:
634 case vmIntrinsics::_intBitsToFloat:
635 case vmIntrinsics::_doubleToRawLongBits:
636 case vmIntrinsics::_doubleToLongBits:
637 case vmIntrinsics::_longBitsToDouble: return inline_fp_conversions(intrinsic_id());
638
639 case vmIntrinsics::_numberOfLeadingZeros_i:
669 return inline_digestBase_implCompressMB(predicate);
670
671 case vmIntrinsics::_multiplyToLen:
672 return inline_multiplyToLen();
673
674 case vmIntrinsics::_squareToLen:
675 return inline_squareToLen();
676
677 case vmIntrinsics::_mulAdd:
678 return inline_mulAdd();
679
680 case vmIntrinsics::_montgomeryMultiply:
681 return inline_montgomeryMultiply();
682 case vmIntrinsics::_montgomerySquare:
683 return inline_montgomerySquare();
684
685 case vmIntrinsics::_ghash_processBlocks:
686 return inline_ghash_processBlocks();
687
688 case vmIntrinsics::_encodeISOArray:
689 return inline_encodeISOArray();
690
691 case vmIntrinsics::_updateCRC32:
692 return inline_updateCRC32();
693 case vmIntrinsics::_updateBytesCRC32:
694 return inline_updateBytesCRC32();
695 case vmIntrinsics::_updateByteBufferCRC32:
696 return inline_updateByteBufferCRC32();
697
698 case vmIntrinsics::_updateBytesCRC32C:
699 return inline_updateBytesCRC32C();
700 case vmIntrinsics::_updateDirectByteBufferCRC32C:
701 return inline_updateDirectByteBufferCRC32C();
702
703 case vmIntrinsics::_updateBytesAdler32:
704 return inline_updateBytesAdler32();
705 case vmIntrinsics::_updateByteBufferAdler32:
706 return inline_updateByteBufferAdler32();
707
708 case vmIntrinsics::_profileBoolean:
709 return inline_profileBoolean();
710 case vmIntrinsics::_isCompileConstant:
711 return inline_isCompileConstant();
712
713 default:
714 // If you get here, it may be that someone has added a new intrinsic
715 // to the list in vmSymbols.hpp without implementing it here.
716 #ifndef PRODUCT
717 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
718 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
719 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
720 }
721 #endif
722 return false;
723 }
724 }
725
726 Node* LibraryCallKit::try_to_predicate(int predicate) {
727 if (!jvms()->has_method()) {
728 // Root JVMState has a null method.
729 assert(map()->memory()->Opcode() == Op_Parm, "");
730 // Insert the memory aliasing node
731 set_all_memory(reset_memory());
732 }
858 Node* cmp_lt = _gvn.transform(new CmpUNode(array_length, last));
859 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
860 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
861 return is_over;
862 }
863
864
865 //--------------------------generate_current_thread--------------------
866 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
867 ciKlass* thread_klass = env()->Thread_klass();
868 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
869 Node* thread = _gvn.transform(new ThreadLocalNode());
870 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
871 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
872 tls_output = thread;
873 return threadObj;
874 }
875
876
877 //------------------------------make_string_method_node------------------------
878 // Helper method for String intrinsic functions. This version is called
879 // with str1 and str2 pointing to String object nodes.
880 //
881 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1, Node* str2) {
882 Node* no_ctrl = NULL;
883
884 // Get start addr of string
885 Node* str1_value = load_String_value(no_ctrl, str1);
886 Node* str1_offset = load_String_offset(no_ctrl, str1);
887 Node* str1_start = array_element_address(str1_value, str1_offset, T_CHAR);
888
889 // Get length of string 1
890 Node* str1_len = load_String_length(no_ctrl, str1);
891
892 Node* str2_value = load_String_value(no_ctrl, str2);
893 Node* str2_offset = load_String_offset(no_ctrl, str2);
894 Node* str2_start = array_element_address(str2_value, str2_offset, T_CHAR);
895
896 Node* str2_len = NULL;
897 Node* result = NULL;
898
899 switch (opcode) {
900 case Op_StrIndexOf:
901 // Get length of string 2
902 str2_len = load_String_length(no_ctrl, str2);
903
904 result = new StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
905 str1_start, str1_len, str2_start, str2_len);
906 break;
907 case Op_StrComp:
908 // Get length of string 2
909 str2_len = load_String_length(no_ctrl, str2);
910
911 result = new StrCompNode(control(), memory(TypeAryPtr::CHARS),
912 str1_start, str1_len, str2_start, str2_len);
913 break;
914 case Op_StrEquals:
915 result = new StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
916 str1_start, str2_start, str1_len);
917 break;
918 default:
919 ShouldNotReachHere();
920 return NULL;
921 }
922
923 // All these intrinsics have checks.
924 C->set_has_split_ifs(true); // Has chance for split-if optimization
925
926 return _gvn.transform(result);
927 }
928
929 // Helper method for String intrinsic functions. This version is called
930 // with str1 and str2 pointing to char[] nodes, with cnt1 and cnt2 pointing
931 // to Int nodes containing the lenghts of str1 and str2.
932 //
933 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) {
934 Node* result = NULL;
935 switch (opcode) {
936 case Op_StrIndexOf:
937 result = new StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
938 str1_start, cnt1, str2_start, cnt2);
939 break;
940 case Op_StrComp:
941 result = new StrCompNode(control(), memory(TypeAryPtr::CHARS),
942 str1_start, cnt1, str2_start, cnt2);
943 break;
944 case Op_StrEquals:
945 result = new StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
946 str1_start, str2_start, cnt1);
947 break;
948 default:
949 ShouldNotReachHere();
950 return NULL;
951 }
952
953 // All these intrinsics have checks.
954 C->set_has_split_ifs(true); // Has chance for split-if optimization
955
956 return _gvn.transform(result);
957 }
958
959 //------------------------------inline_string_compareTo------------------------
960 // public int java.lang.String.compareTo(String anotherString);
961 bool LibraryCallKit::inline_string_compareTo() {
962 Node* receiver = null_check(argument(0));
963 Node* arg = null_check(argument(1));
964 if (stopped()) {
965 return true;
966 }
967 set_result(make_string_method_node(Op_StrComp, receiver, arg));
968 return true;
969 }
970
971 //------------------------------inline_string_equals------------------------
972 bool LibraryCallKit::inline_string_equals() {
973 Node* receiver = null_check_receiver();
974 // NOTE: Do not null check argument for String.equals() because spec
975 // allows to specify NULL as argument.
976 Node* argument = this->argument(1);
977 if (stopped()) {
978 return true;
979 }
980
981 // paths (plus control) merge
982 RegionNode* region = new RegionNode(5);
983 Node* phi = new PhiNode(region, TypeInt::BOOL);
984
985 // does source == target string?
986 Node* cmp = _gvn.transform(new CmpPNode(receiver, argument));
987 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
988
989 Node* if_eq = generate_slow_guard(bol, NULL);
990 if (if_eq != NULL) {
991 // receiver == argument
992 phi->init_req(2, intcon(1));
993 region->init_req(2, if_eq);
994 }
995
996 // get String klass for instanceOf
997 ciInstanceKlass* klass = env()->String_klass();
998
999 if (!stopped()) {
1000 Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
1001 Node* cmp = _gvn.transform(new CmpINode(inst, intcon(1)));
1002 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
1003
1004 Node* inst_false = generate_guard(bol, NULL, PROB_MIN);
1005 //instanceOf == true, fallthrough
1006
1007 if (inst_false != NULL) {
1008 phi->init_req(3, intcon(0));
1009 region->init_req(3, inst_false);
1010 }
1011 }
1012
1013 if (!stopped()) {
1014 const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
1015
1016 // Properly cast the argument to String
1017 argument = _gvn.transform(new CheckCastPPNode(control(), argument, string_type));
1018 // This path is taken only when argument's type is String:NotNull.
1019 argument = cast_not_null(argument, false);
1020
1021 Node* no_ctrl = NULL;
1022
1023 // Get start addr of receiver
1024 Node* receiver_val = load_String_value(no_ctrl, receiver);
1025 Node* receiver_offset = load_String_offset(no_ctrl, receiver);
1026 Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR);
1027
1028 // Get length of receiver
1029 Node* receiver_cnt = load_String_length(no_ctrl, receiver);
1030
1031 // Get start addr of argument
1032 Node* argument_val = load_String_value(no_ctrl, argument);
1033 Node* argument_offset = load_String_offset(no_ctrl, argument);
1034 Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
1035
1036 // Get length of argument
1037 Node* argument_cnt = load_String_length(no_ctrl, argument);
1038
1039 // Check for receiver count != argument count
1040 Node* cmp = _gvn.transform(new CmpINode(receiver_cnt, argument_cnt));
1041 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
1042 Node* if_ne = generate_slow_guard(bol, NULL);
1043 if (if_ne != NULL) {
1044 phi->init_req(4, intcon(0));
1045 region->init_req(4, if_ne);
1046 }
1047
1048 // Check for count == 0 is done by assembler code for StrEquals.
1049
1050 if (!stopped()) {
1051 Node* equals = make_string_method_node(Op_StrEquals, receiver_start, receiver_cnt, argument_start, argument_cnt);
1052 phi->init_req(1, equals);
1053 region->init_req(1, control());
1054 }
1055 }
1056
1057 // post merge
1058 set_control(_gvn.transform(region));
1059 record_for_igvn(region);
1060
1061 set_result(_gvn.transform(phi));
1062 return true;
1063 }
1064
1065 //------------------------------inline_array_equals----------------------------
1066 bool LibraryCallKit::inline_array_equals() {
1067 Node* arg1 = argument(0);
1068 Node* arg2 = argument(1);
1069 set_result(_gvn.transform(new AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
1070 return true;
1071 }
1072
1073 // Java version of String.indexOf(constant string)
1074 // class StringDecl {
1075 // StringDecl(char[] ca) {
1076 // offset = 0;
1077 // count = ca.length;
1078 // value = ca;
1079 // }
1080 // int offset;
1081 // int count;
1082 // char[] value;
1083 // }
1084 //
1085 // static int string_indexOf_J(StringDecl string_object, char[] target_object,
1086 // int targetOffset, int cache_i, int md2) {
1087 // int cache = cache_i;
1088 // int sourceOffset = string_object.offset;
1089 // int sourceCount = string_object.count;
1090 // int targetCount = target_object.length;
1091 //
1092 // int targetCountLess1 = targetCount - 1;
1093 // int sourceEnd = sourceOffset + sourceCount - targetCountLess1;
1094 //
1095 // char[] source = string_object.value;
1096 // char[] target = target_object;
1097 // int lastChar = target[targetCountLess1];
1098 //
1099 // outer_loop:
1100 // for (int i = sourceOffset; i < sourceEnd; ) {
1101 // int src = source[i + targetCountLess1];
1102 // if (src == lastChar) {
1103 // // With random strings and a 4-character alphabet,
1104 // // reverse matching at this point sets up 0.8% fewer
1105 // // frames, but (paradoxically) makes 0.3% more probes.
1106 // // Since those probes are nearer the lastChar probe,
1107 // // there is may be a net D$ win with reverse matching.
1108 // // But, reversing loop inhibits unroll of inner loop
1109 // // for unknown reason. So, does running outer loop from
1110 // // (sourceOffset - targetCountLess1) to (sourceOffset + sourceCount)
1111 // for (int j = 0; j < targetCountLess1; j++) {
1112 // if (target[targetOffset + j] != source[i+j]) {
1113 // if ((cache & (1 << source[i+j])) == 0) {
1114 // if (md2 < j+1) {
1115 // i += j+1;
1116 // continue outer_loop;
1117 // }
1118 // }
1119 // i += md2;
1120 // continue outer_loop;
1121 // }
1122 // }
1123 // return i - sourceOffset;
1124 // }
1125 // if ((cache & (1 << src)) == 0) {
1126 // i += targetCountLess1;
1127 // } // using "i += targetCount;" and an "else i++;" causes a jump to jump.
1128 // i++;
1129 // }
1130 // return -1;
1131 // }
1132
1133 //------------------------------string_indexOf------------------------
1134 Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_array, jint targetOffset_i,
1135 jint cache_i, jint md2_i) {
1136
1137 Node* no_ctrl = NULL;
1138 float likely = PROB_LIKELY(0.9);
1139 float unlikely = PROB_UNLIKELY(0.9);
1140
1141 const int nargs = 0; // no arguments to push back for uncommon trap in predicate
1142
1143 Node* source = load_String_value(no_ctrl, string_object);
1144 Node* sourceOffset = load_String_offset(no_ctrl, string_object);
1145 Node* sourceCount = load_String_length(no_ctrl, string_object);
1146
1147 Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)));
1148 jint target_length = target_array->length();
1149 const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
1150 const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
1151
1152 // String.value field is known to be @Stable.
1153 if (UseImplicitStableValues) {
1154 target = cast_array_to_stable(target, target_type);
1155 }
1156
1157 IdealKit kit(this, false, true);
1158 #define __ kit.
1159 Node* zero = __ ConI(0);
1160 Node* one = __ ConI(1);
1161 Node* cache = __ ConI(cache_i);
1162 Node* md2 = __ ConI(md2_i);
1163 Node* lastChar = __ ConI(target_array->char_at(target_length - 1));
1164 Node* targetCountLess1 = __ ConI(target_length - 1);
1165 Node* targetOffset = __ ConI(targetOffset_i);
1166 Node* sourceEnd = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
1167
1168 IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
1169 Node* outer_loop = __ make_label(2 /* goto */);
1170 Node* return_ = __ make_label(1);
1171
1172 __ set(rtn,__ ConI(-1));
1173 __ loop(this, nargs, i, sourceOffset, BoolTest::lt, sourceEnd); {
1174 Node* i2 = __ AddI(__ value(i), targetCountLess1);
1175 // pin to prohibit loading of "next iteration" value which may SEGV (rare)
1176 Node* src = load_array_element(__ ctrl(), source, i2, TypeAryPtr::CHARS);
1177 __ if_then(src, BoolTest::eq, lastChar, unlikely); {
1178 __ loop(this, nargs, j, zero, BoolTest::lt, targetCountLess1); {
1179 Node* tpj = __ AddI(targetOffset, __ value(j));
1180 Node* targ = load_array_element(no_ctrl, target, tpj, target_type);
1181 Node* ipj = __ AddI(__ value(i), __ value(j));
1182 Node* src2 = load_array_element(no_ctrl, source, ipj, TypeAryPtr::CHARS);
1183 __ if_then(targ, BoolTest::ne, src2); {
1184 __ if_then(__ AndI(cache, __ LShiftI(one, src2)), BoolTest::eq, zero); {
1185 __ if_then(md2, BoolTest::lt, __ AddI(__ value(j), one)); {
1186 __ increment(i, __ AddI(__ value(j), one));
1187 __ goto_(outer_loop);
1188 } __ end_if(); __ dead(j);
1189 }__ end_if(); __ dead(j);
1190 __ increment(i, md2);
1191 __ goto_(outer_loop);
1192 }__ end_if();
1193 __ increment(j, one);
1194 }__ end_loop(); __ dead(j);
1195 __ set(rtn, __ SubI(__ value(i), sourceOffset)); __ dead(i);
1196 __ goto_(return_);
1197 }__ end_if();
1198 __ if_then(__ AndI(cache, __ LShiftI(one, src)), BoolTest::eq, zero, likely); {
1199 __ increment(i, targetCountLess1);
1200 }__ end_if();
1201 __ increment(i, one);
1202 __ bind(outer_loop);
1203 }__ end_loop(); __ dead(i);
1204 __ bind(return_);
1205
1206 // Final sync IdealKit and GraphKit.
1207 final_sync(kit);
1208 Node* result = __ value(rtn);
1209 #undef __
1210 C->set_has_loops(true);
1211 return result;
1212 }
1213
1214 //------------------------------inline_string_indexOf------------------------
1215 bool LibraryCallKit::inline_string_indexOf() {
1216 Node* receiver = argument(0);
1217 Node* arg = argument(1);
1218
1219 Node* result;
1220 if (Matcher::has_match_rule(Op_StrIndexOf) &&
1221 UseSSE42Intrinsics) {
1222 // Generate SSE4.2 version of indexOf
1223 // We currently only have match rules that use SSE4.2
1224
1225 receiver = null_check(receiver);
1226 arg = null_check(arg);
1227 if (stopped()) {
1228 return true;
1229 }
1230
1231 // Make the merge point
1232 RegionNode* result_rgn = new RegionNode(4);
1233 Node* result_phi = new PhiNode(result_rgn, TypeInt::INT);
1234 Node* no_ctrl = NULL;
1235
1236 // Get start addr of source string
1237 Node* source = load_String_value(no_ctrl, receiver);
1238 Node* source_offset = load_String_offset(no_ctrl, receiver);
1239 Node* source_start = array_element_address(source, source_offset, T_CHAR);
1240
1241 // Get length of source string
1242 Node* source_cnt = load_String_length(no_ctrl, receiver);
1243
1244 // Get start addr of substring
1245 Node* substr = load_String_value(no_ctrl, arg);
1246 Node* substr_offset = load_String_offset(no_ctrl, arg);
1247 Node* substr_start = array_element_address(substr, substr_offset, T_CHAR);
1248
1249 // Get length of source string
1250 Node* substr_cnt = load_String_length(no_ctrl, arg);
1251
1252 // Check for substr count > string count
1253 Node* cmp = _gvn.transform(new CmpINode(substr_cnt, source_cnt));
1254 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
1255 Node* if_gt = generate_slow_guard(bol, NULL);
1256 if (if_gt != NULL) {
1257 result_phi->init_req(2, intcon(-1));
1258 result_rgn->init_req(2, if_gt);
1259 }
1260
1261 if (!stopped()) {
1262 // Check for substr count == 0
1263 cmp = _gvn.transform(new CmpINode(substr_cnt, intcon(0)));
1264 bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1265 Node* if_zero = generate_slow_guard(bol, NULL);
1266 if (if_zero != NULL) {
1267 result_phi->init_req(3, intcon(0));
1268 result_rgn->init_req(3, if_zero);
1269 }
1270 }
1271
1272 if (!stopped()) {
1273 result = make_string_method_node(Op_StrIndexOf, source_start, source_cnt, substr_start, substr_cnt);
1274 result_phi->init_req(1, result);
1275 result_rgn->init_req(1, control());
1276 }
1277 set_control(_gvn.transform(result_rgn));
1278 record_for_igvn(result_rgn);
1279 result = _gvn.transform(result_phi);
1280
1281 } else { // Use LibraryCallKit::string_indexOf
1282 // don't intrinsify if argument isn't a constant string.
1283 if (!arg->is_Con()) {
1284 return false;
1285 }
1286 const TypeOopPtr* str_type = _gvn.type(arg)->isa_oopptr();
1287 if (str_type == NULL) {
1288 return false;
1289 }
1290 ciInstanceKlass* klass = env()->String_klass();
1291 ciObject* str_const = str_type->const_oop();
1292 if (str_const == NULL || str_const->klass() != klass) {
1293 return false;
1294 }
1295 ciInstance* str = str_const->as_instance();
1296 assert(str != NULL, "must be instance");
1297
1298 ciObject* v = str->field_value_by_offset(java_lang_String::value_offset_in_bytes()).as_object();
1299 ciTypeArray* pat = v->as_type_array(); // pattern (argument) character array
1300
1301 int o;
1302 int c;
1303 if (java_lang_String::has_offset_field()) {
1304 o = str->field_value_by_offset(java_lang_String::offset_offset_in_bytes()).as_int();
1305 c = str->field_value_by_offset(java_lang_String::count_offset_in_bytes()).as_int();
1306 } else {
1307 o = 0;
1308 c = pat->length();
1309 }
1310
1311 // constant strings have no offset and count == length which
1312 // simplifies the resulting code somewhat so lets optimize for that.
1313 if (o != 0 || c != pat->length()) {
1314 return false;
1315 }
1316
1317 receiver = null_check(receiver, T_OBJECT);
1318 // NOTE: No null check on the argument is needed since it's a constant String oop.
1319 if (stopped()) {
1320 return true;
1321 }
1322
1323 // The null string as a pattern always returns 0 (match at beginning of string)
1324 if (c == 0) {
1325 set_result(intcon(0));
1326 return true;
1327 }
1328
1329 // Generate default indexOf
1330 jchar lastChar = pat->char_at(o + (c - 1));
1331 int cache = 0;
1332 int i;
1333 for (i = 0; i < c - 1; i++) {
1334 assert(i < pat->length(), "out of range");
1335 cache |= (1 << (pat->char_at(o + i) & (sizeof(cache) * BitsPerByte - 1)));
1336 }
1337
1338 int md2 = c;
1339 for (i = 0; i < c - 1; i++) {
1340 assert(i < pat->length(), "out of range");
1341 if (pat->char_at(o + i) == lastChar) {
1342 md2 = (c - 1) - i;
1343 }
1344 }
1345
1346 result = string_indexOf(receiver, pat, o, cache, md2);
1347 }
1348 set_result(result);
1349 return true;
1350 }
1351
1352 //--------------------------round_double_node--------------------------------
1353 // Round a double node if necessary.
1354 Node* LibraryCallKit::round_double_node(Node* n) {
1355 if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1356 n = _gvn.transform(new RoundDoubleNode(0, n));
1357 return n;
1358 }
1359
1360 //------------------------------inline_math-----------------------------------
1361 // public static double Math.abs(double)
1362 // public static double Math.sqrt(double)
1363 // public static double Math.log(double)
1364 // public static double Math.log10(double)
1365 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1366 Node* arg = round_double_node(argument(0));
1367 Node* n;
1368 switch (id) {
4170 r->init_req(2, iffalse);
4171
4172 // Post merge
4173 set_control(_gvn.transform(r));
4174 record_for_igvn(r);
4175
4176 C->set_has_split_ifs(true); // Has chance for split-if optimization
4177 result = phi;
4178 assert(result->bottom_type()->isa_int(), "must be");
4179 break;
4180 }
4181
4182 default:
4183 fatal_unexpected_iid(id);
4184 break;
4185 }
4186 set_result(_gvn.transform(result));
4187 return true;
4188 }
4189
4190 #ifdef _LP64
4191 #define XTOP ,top() /*additional argument*/
4192 #else //_LP64
4193 #define XTOP /*no additional argument*/
4194 #endif //_LP64
4195
4196 //----------------------inline_unsafe_copyMemory-------------------------
4197 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4198 bool LibraryCallKit::inline_unsafe_copyMemory() {
4199 if (callee()->is_static()) return false; // caller must have the capability!
4200 null_check_receiver(); // null-check receiver
4201 if (stopped()) return true;
4202
4203 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
4204
4205 Node* src_ptr = argument(1); // type: oop
4206 Node* src_off = ConvL2X(argument(2)); // type: long
4207 Node* dst_ptr = argument(4); // type: oop
4208 Node* dst_off = ConvL2X(argument(5)); // type: long
4209 Node* size = ConvL2X(argument(7)); // type: long
4210
4211 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4212 "fieldOffset must be byte-scaled");
4213
4214 Node* src = make_unsafe_address(src_ptr, src_off);
4215 Node* dst = make_unsafe_address(dst_ptr, dst_off);
4984 // no receiver since it is static method
4985 Node *src = argument(0);
4986 Node *src_offset = argument(1);
4987 Node *dst = argument(2);
4988 Node *dst_offset = argument(3);
4989 Node *length = argument(4);
4990
4991 const Type* src_type = src->Value(&_gvn);
4992 const Type* dst_type = dst->Value(&_gvn);
4993 const TypeAryPtr* top_src = src_type->isa_aryptr();
4994 const TypeAryPtr* top_dest = dst_type->isa_aryptr();
4995 if (top_src == NULL || top_src->klass() == NULL ||
4996 top_dest == NULL || top_dest->klass() == NULL) {
4997 // failed array check
4998 return false;
4999 }
5000
5001 // Figure out the size and type of the elements we will be copying.
5002 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5003 BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5004 if (src_elem != T_CHAR || dst_elem != T_BYTE) {
5005 return false;
5006 }
5007 Node* src_start = array_element_address(src, src_offset, src_elem);
5008 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5009 // 'src_start' points to src array + scaled offset
5010 // 'dst_start' points to dst array + scaled offset
5011
5012 const TypeAryPtr* mtype = TypeAryPtr::BYTES;
5013 Node* enc = new EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length);
5014 enc = _gvn.transform(enc);
5015 Node* res_mem = _gvn.transform(new SCMemProjNode(enc));
5016 set_memory(res_mem, mtype);
5017 set_result(enc);
5018 return true;
5019 }
5020
5021 //-------------inline_multiplyToLen-----------------------------------
5022 bool LibraryCallKit::inline_multiplyToLen() {
5023 assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
5024
5025 address stubAddr = StubRoutines::multiplyToLen();
5026 if (stubAddr == NULL) {
5027 return false; // Intrinsic's stub is not implemented on this platform
5105 _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));
5106 // Final sync IdealKit and GraphKit.
5107 final_sync(ideal);
5108 #undef __
5109
5110 Node* z_start = array_element_address(z, intcon(0), T_INT);
5111
5112 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5113 OptoRuntime::multiplyToLen_Type(),
5114 stubAddr, stubName, TypePtr::BOTTOM,
5115 x_start, xlen, y_start, ylen, z_start, zlen);
5116 } // original reexecute is set back here
5117
5118 C->set_has_split_ifs(true); // Has chance for split-if optimization
5119 set_result(z);
5120 return true;
5121 }
5122
5123 //-------------inline_squareToLen------------------------------------
5124 bool LibraryCallKit::inline_squareToLen() {
5125 assert(UseSquareToLenIntrinsic, "not implementated on this platform");
5126
5127 address stubAddr = StubRoutines::squareToLen();
5128 if (stubAddr == NULL) {
5129 return false; // Intrinsic's stub is not implemented on this platform
5130 }
5131 const char* stubName = "squareToLen";
5132
5133 assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5134
5135 Node* x = argument(0);
5136 Node* len = argument(1);
5137 Node* z = argument(2);
5138 Node* zlen = argument(3);
5139
5140 const Type* x_type = x->Value(&_gvn);
5141 const Type* z_type = z->Value(&_gvn);
5142 const TypeAryPtr* top_x = x_type->isa_aryptr();
5143 const TypeAryPtr* top_z = z_type->isa_aryptr();
5144 if (top_x == NULL || top_x->klass() == NULL ||
5145 top_z == NULL || top_z->klass() == NULL) {
5151 BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5152 if (x_elem != T_INT || z_elem != T_INT) {
5153 return false;
5154 }
5155
5156
5157 Node* x_start = array_element_address(x, intcon(0), x_elem);
5158 Node* z_start = array_element_address(z, intcon(0), z_elem);
5159
5160 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5161 OptoRuntime::squareToLen_Type(),
5162 stubAddr, stubName, TypePtr::BOTTOM,
5163 x_start, len, z_start, zlen);
5164
5165 set_result(z);
5166 return true;
5167 }
5168
5169 //-------------inline_mulAdd------------------------------------------
5170 bool LibraryCallKit::inline_mulAdd() {
5171 assert(UseMulAddIntrinsic, "not implementated on this platform");
5172
5173 address stubAddr = StubRoutines::mulAdd();
5174 if (stubAddr == NULL) {
5175 return false; // Intrinsic's stub is not implemented on this platform
5176 }
5177 const char* stubName = "mulAdd";
5178
5179 assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
5180
5181 Node* out = argument(0);
5182 Node* in = argument(1);
5183 Node* offset = argument(2);
5184 Node* len = argument(3);
5185 Node* k = argument(4);
5186
5187 const Type* out_type = out->Value(&_gvn);
5188 const Type* in_type = in->Value(&_gvn);
5189 const TypeAryPtr* top_out = out_type->isa_aryptr();
5190 const TypeAryPtr* top_in = in_type->isa_aryptr();
5191 if (top_out == NULL || top_out->klass() == NULL ||
|
183 }
184 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
185 return generate_array_guard_common(kls, region, true, false);
186 }
187 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
188 return generate_array_guard_common(kls, region, true, true);
189 }
190 Node* generate_array_guard_common(Node* kls, RegionNode* region,
191 bool obj_array, bool not_array);
192 Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
193 CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
194 bool is_virtual = false, bool is_static = false);
195 CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
196 return generate_method_call(method_id, false, true);
197 }
198 CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
199 return generate_method_call(method_id, true, false);
200 }
201 Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
202
203 Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae);
204 bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae);
205 bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae);
206 bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae);
207 bool inline_string_indexOfChar();
208 bool inline_string_equals(StrIntrinsicNode::ArgEnc ae);
209 bool inline_string_toBytesU();
210 bool inline_string_getCharsU();
211 bool inline_string_copy(bool compress);
212 bool inline_string_char_access(bool is_store);
213 Node* round_double_node(Node* n);
214 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
215 bool inline_math_native(vmIntrinsics::ID id);
216 bool inline_trig(vmIntrinsics::ID id);
217 bool inline_math(vmIntrinsics::ID id);
218 template <typename OverflowOp>
219 bool inline_math_overflow(Node* arg1, Node* arg2);
220 void inline_math_mathExact(Node* math, Node* test);
221 bool inline_math_addExactI(bool is_increment);
222 bool inline_math_addExactL(bool is_increment);
223 bool inline_math_multiplyExactI();
224 bool inline_math_multiplyExactL();
225 bool inline_math_negateExactI();
226 bool inline_math_negateExactL();
227 bool inline_math_subtractExactI(bool is_decrement);
228 bool inline_math_subtractExactL(bool is_decrement);
229 bool inline_pow();
230 Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
231 bool inline_min_max(vmIntrinsics::ID id);
232 bool inline_notify(vmIntrinsics::ID id);
238 // Generates the guards that check whether the result of
239 // Unsafe.getObject should be recorded in an SATB log buffer.
240 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
241 bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
242 static bool klass_needs_init_guard(Node* kls);
243 bool inline_unsafe_allocate();
244 bool inline_unsafe_copyMemory();
245 bool inline_native_currentThread();
246 #ifdef TRACE_HAVE_INTRINSICS
247 bool inline_native_classID();
248 bool inline_native_threadID();
249 #endif
250 bool inline_native_time_funcs(address method, const char* funcName);
251 bool inline_native_isInterrupted();
252 bool inline_native_Class_query(vmIntrinsics::ID id);
253 bool inline_native_subtype_check();
254
255 bool inline_native_newArray();
256 bool inline_native_getLength();
257 bool inline_array_copyOf(bool is_copyOfRange);
258 bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
259 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
260 bool inline_native_clone(bool is_virtual);
261 bool inline_native_Reflection_getCallerClass();
262 // Helper function for inlining native object hash method
263 bool inline_native_hashcode(bool is_virtual, bool is_static);
264 bool inline_native_getClass();
265
266 // Helper functions for inlining arraycopy
267 bool inline_arraycopy();
268 AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
269 RegionNode* slow_region);
270 JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
271 void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp);
272
273 typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
274 bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind);
275 bool inline_unsafe_ordered_store(BasicType type);
276 bool inline_unsafe_fence(vmIntrinsics::ID id);
277 bool inline_fp_conversions(vmIntrinsics::ID id);
278 bool inline_number_methods(vmIntrinsics::ID id);
285 Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
286 bool inline_ghash_processBlocks();
287 bool inline_sha_implCompress(vmIntrinsics::ID id);
288 bool inline_digestBase_implCompressMB(int predicate);
289 bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
290 bool long_state, address stubAddr, const char *stubName,
291 Node* src_start, Node* ofs, Node* limit);
292 Node* get_state_from_sha_object(Node *sha_object);
293 Node* get_state_from_sha5_object(Node *sha_object);
294 Node* inline_digestBase_implCompressMB_predicate(int predicate);
295 bool inline_encodeISOArray();
296 bool inline_updateCRC32();
297 bool inline_updateBytesCRC32();
298 bool inline_updateByteBufferCRC32();
299 Node* get_table_from_crc32c_class(ciInstanceKlass *crc32c_class);
300 bool inline_updateBytesCRC32C();
301 bool inline_updateDirectByteBufferCRC32C();
302 bool inline_updateBytesAdler32();
303 bool inline_updateByteBufferAdler32();
304 bool inline_multiplyToLen();
305 bool inline_hasNegatives();
306 bool inline_squareToLen();
307 bool inline_mulAdd();
308 bool inline_montgomeryMultiply();
309 bool inline_montgomerySquare();
310
311 bool inline_profileBoolean();
312 bool inline_isCompileConstant();
313 };
314
315 //---------------------------make_vm_intrinsic----------------------------
316 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
317 vmIntrinsics::ID id = m->intrinsic_id();
318 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
319
320 if (!m->is_loaded()) {
321 // Do not attempt to inline unloaded methods.
322 return NULL;
323 }
324
325 C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
445 // The intrinsic bailed out
446 if (C->print_intrinsics() || C->print_inlining()) {
447 if (jvms->has_method()) {
448 // Not a root compile.
449 const char* msg = "failed to generate predicate for intrinsic";
450 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
451 } else {
452 // Root compile
453 C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
454 vmIntrinsics::name_at(intrinsic_id()),
455 (is_virtual() ? " (virtual)" : ""), bci);
456 }
457 }
458 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
459 return NULL;
460 }
461
462 bool LibraryCallKit::try_to_inline(int predicate) {
463 // Handle symbolic names for otherwise undistinguished boolean switches:
464 const bool is_store = true;
465 const bool is_compress = true;
466 const bool is_native_ptr = true;
467 const bool is_static = true;
468 const bool is_volatile = true;
469
470 if (!jvms()->has_method()) {
471 // Root JVMState has a null method.
472 assert(map()->memory()->Opcode() == Op_Parm, "");
473 // Insert the memory aliasing node
474 set_all_memory(reset_memory());
475 }
476 assert(merged_memory(), "");
477
478
479 switch (intrinsic_id()) {
480 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
481 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
482 case vmIntrinsics::_getClass: return inline_native_getClass();
483
484 case vmIntrinsics::_dsin:
485 case vmIntrinsics::_dcos:
500 if (InlineNotify) {
501 return inline_notify(intrinsic_id());
502 }
503 return false;
504
505 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
506 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
507 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
508 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
509 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
510 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
511 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
512 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
513 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
514 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
515 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
516 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
517
518 case vmIntrinsics::_arraycopy: return inline_arraycopy();
519
520 case vmIntrinsics::_compareToL: return inline_string_compareTo(StrIntrinsicNode::LL);
521 case vmIntrinsics::_compareToU: return inline_string_compareTo(StrIntrinsicNode::UU);
522 case vmIntrinsics::_compareToLU: return inline_string_compareTo(StrIntrinsicNode::LU);
523 case vmIntrinsics::_compareToUL: return inline_string_compareTo(StrIntrinsicNode::UL);
524
525 case vmIntrinsics::_indexOfL: return inline_string_indexOf(StrIntrinsicNode::LL);
526 case vmIntrinsics::_indexOfU: return inline_string_indexOf(StrIntrinsicNode::UU);
527 case vmIntrinsics::_indexOfUL: return inline_string_indexOf(StrIntrinsicNode::UL);
528 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
529 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
530 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
531 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar();
532
533 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
534 case vmIntrinsics::_equalsU: return inline_string_equals(StrIntrinsicNode::UU);
535
536 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
537 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
538 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
539 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
540
541 case vmIntrinsics::_compressStringC:
542 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
543 case vmIntrinsics::_inflateStringC:
544 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
545
546 case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile);
547 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile);
548 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile);
549 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile);
550 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile);
551 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile);
552 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile);
553 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile);
554 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile);
555 case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile);
556 case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile);
557 case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile);
558 case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile);
559 case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile);
560 case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile);
561 case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile);
562 case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile);
563 case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile);
564
627 case vmIntrinsics::_loadFence:
628 case vmIntrinsics::_storeFence:
629 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
630
631 case vmIntrinsics::_currentThread: return inline_native_currentThread();
632 case vmIntrinsics::_isInterrupted: return inline_native_isInterrupted();
633
634 #ifdef TRACE_HAVE_INTRINSICS
635 case vmIntrinsics::_classID: return inline_native_classID();
636 case vmIntrinsics::_threadID: return inline_native_threadID();
637 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
638 #endif
639 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
640 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
641 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
642 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
643 case vmIntrinsics::_newArray: return inline_native_newArray();
644 case vmIntrinsics::_getLength: return inline_native_getLength();
645 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
646 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
647 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
648 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
649 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
650
651 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
652
653 case vmIntrinsics::_isInstance:
654 case vmIntrinsics::_getModifiers:
655 case vmIntrinsics::_isInterface:
656 case vmIntrinsics::_isArray:
657 case vmIntrinsics::_isPrimitive:
658 case vmIntrinsics::_getSuperclass:
659 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
660
661 case vmIntrinsics::_floatToRawIntBits:
662 case vmIntrinsics::_floatToIntBits:
663 case vmIntrinsics::_intBitsToFloat:
664 case vmIntrinsics::_doubleToRawLongBits:
665 case vmIntrinsics::_doubleToLongBits:
666 case vmIntrinsics::_longBitsToDouble: return inline_fp_conversions(intrinsic_id());
667
668 case vmIntrinsics::_numberOfLeadingZeros_i:
698 return inline_digestBase_implCompressMB(predicate);
699
700 case vmIntrinsics::_multiplyToLen:
701 return inline_multiplyToLen();
702
703 case vmIntrinsics::_squareToLen:
704 return inline_squareToLen();
705
706 case vmIntrinsics::_mulAdd:
707 return inline_mulAdd();
708
709 case vmIntrinsics::_montgomeryMultiply:
710 return inline_montgomeryMultiply();
711 case vmIntrinsics::_montgomerySquare:
712 return inline_montgomerySquare();
713
714 case vmIntrinsics::_ghash_processBlocks:
715 return inline_ghash_processBlocks();
716
717 case vmIntrinsics::_encodeISOArray:
718 case vmIntrinsics::_encodeByteISOArray:
719 return inline_encodeISOArray();
720
721 case vmIntrinsics::_updateCRC32:
722 return inline_updateCRC32();
723 case vmIntrinsics::_updateBytesCRC32:
724 return inline_updateBytesCRC32();
725 case vmIntrinsics::_updateByteBufferCRC32:
726 return inline_updateByteBufferCRC32();
727
728 case vmIntrinsics::_updateBytesCRC32C:
729 return inline_updateBytesCRC32C();
730 case vmIntrinsics::_updateDirectByteBufferCRC32C:
731 return inline_updateDirectByteBufferCRC32C();
732
733 case vmIntrinsics::_updateBytesAdler32:
734 return inline_updateBytesAdler32();
735 case vmIntrinsics::_updateByteBufferAdler32:
736 return inline_updateByteBufferAdler32();
737
738 case vmIntrinsics::_profileBoolean:
739 return inline_profileBoolean();
740 case vmIntrinsics::_isCompileConstant:
741 return inline_isCompileConstant();
742
743 case vmIntrinsics::_hasNegatives:
744 return inline_hasNegatives();
745
746 default:
747 // If you get here, it may be that someone has added a new intrinsic
748 // to the list in vmSymbols.hpp without implementing it here.
749 #ifndef PRODUCT
750 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
751 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
752 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
753 }
754 #endif
755 return false;
756 }
757 }
758
759 Node* LibraryCallKit::try_to_predicate(int predicate) {
760 if (!jvms()->has_method()) {
761 // Root JVMState has a null method.
762 assert(map()->memory()->Opcode() == Op_Parm, "");
763 // Insert the memory aliasing node
764 set_all_memory(reset_memory());
765 }
891 Node* cmp_lt = _gvn.transform(new CmpUNode(array_length, last));
892 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
893 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
894 return is_over;
895 }
896
897
898 //--------------------------generate_current_thread--------------------
899 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
900 ciKlass* thread_klass = env()->Thread_klass();
901 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
902 Node* thread = _gvn.transform(new ThreadLocalNode());
903 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
904 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
905 tls_output = thread;
906 return threadObj;
907 }
908
909
910 //------------------------------make_string_method_node------------------------
911 // Helper method for String intrinsic functions. This version is called with
912 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
913 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
914 // containing the lengths of str1 and str2.
915 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
916 Node* result = NULL;
917 switch (opcode) {
918 case Op_StrIndexOf:
919 result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
920 str1_start, cnt1, str2_start, cnt2, ae);
921 break;
922 case Op_StrComp:
923 result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
924 str1_start, cnt1, str2_start, cnt2, ae);
925 break;
926 case Op_StrEquals:
927 result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES),
928 str1_start, str2_start, cnt1, ae);
929 break;
930 default:
931 ShouldNotReachHere();
932 return NULL;
933 }
934
935 // All these intrinsics have checks.
936 C->set_has_split_ifs(true); // Has chance for split-if optimization
937
938 return _gvn.transform(result);
939 }
940
941 //------------------------------inline_string_compareTo------------------------
942 bool LibraryCallKit::inline_string_compareTo(StrIntrinsicNode::ArgEnc ae) {
943 Node* arg1 = argument(0);
944 Node* arg2 = argument(1);
945
946 // Get start addr and length of first argument
947 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
948 Node* arg1_cnt = load_array_length(arg1);
949
950 // Get start addr and length of second argument
951 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
952 Node* arg2_cnt = load_array_length(arg2);
953
954 Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
955 set_result(result);
956 return true;
957 }
958
959 //------------------------------inline_string_equals------------------------
960 bool LibraryCallKit::inline_string_equals(StrIntrinsicNode::ArgEnc ae) {
961 Node* arg1 = argument(0);
962 Node* arg2 = argument(1);
963
964 // paths (plus control) merge
965 RegionNode* region = new RegionNode(3);
966 Node* phi = new PhiNode(region, TypeInt::BOOL);
967
968 if (!stopped()) {
969 // Get start addr and length of first argument
970 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
971 Node* arg1_cnt = load_array_length(arg1);
972
973 // Get start addr and length of second argument
974 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
975 Node* arg2_cnt = load_array_length(arg2);
976
977 // Check for arg1_cnt != arg2_cnt
978 Node* cmp = _gvn.transform(new CmpINode(arg1_cnt, arg2_cnt));
979 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
980 Node* if_ne = generate_slow_guard(bol, NULL);
981 if (if_ne != NULL) {
982 phi->init_req(2, intcon(0));
983 region->init_req(2, if_ne);
984 }
985
986 // Check for count == 0 is done by assembler code for StrEquals.
987
988 if (!stopped()) {
989 Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
990 phi->init_req(1, equals);
991 region->init_req(1, control());
992 }
993 }
994
995 // post merge
996 set_control(_gvn.transform(region));
997 record_for_igvn(region);
998
999 set_result(_gvn.transform(phi));
1000 return true;
1001 }
1002
1003 //------------------------------inline_array_equals----------------------------
1004 bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
1005 assert(ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::LL, "unsupported array types");
1006 Node* arg1 = argument(0);
1007 Node* arg2 = argument(1);
1008
1009 const TypeAryPtr* mtype = (ae == StrIntrinsicNode::UU) ? TypeAryPtr::CHARS : TypeAryPtr::BYTES;
1010 set_result(_gvn.transform(new AryEqNode(control(), memory(mtype), arg1, arg2, ae)));
1011 return true;
1012 }
1013
1014 //------------------------------inline_hasNegatives------------------------------
1015 bool LibraryCallKit::inline_hasNegatives() {
1016 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
1017
1018 assert(callee()->signature()->size() == 3, "hasNegatives has 3 parameters");
1019 // no receiver since it is static method
1020 Node* ba = argument(0);
1021 Node* offset = argument(1);
1022 Node* len = argument(2);
1023
1024 RegionNode* bailout = new RegionNode(1);
1025 record_for_igvn(bailout);
1026
1027 // offset must not be negative.
1028 generate_negative_guard(offset, bailout);
1029
1030 // offset + length must not exceed length of ba.
1031 generate_limit_guard(offset, len, load_array_length(ba), bailout);
1032
1033 if (bailout->req() > 1) {
1034 PreserveJVMState pjvms(this);
1035 set_control(_gvn.transform(bailout));
1036 uncommon_trap(Deoptimization::Reason_intrinsic,
1037 Deoptimization::Action_maybe_recompile);
1038 }
1039 if (!stopped()) {
1040 Node* ba_start = array_element_address(ba, offset, T_BYTE);
1041 Node* result = new HasNegativesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
1042 set_result(_gvn.transform(result));
1043 }
1044 return true;
1045 }
1046
1047 //------------------------------inline_string_indexOf------------------------
1048 bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
1049 if (!Matcher::has_match_rule(Op_StrIndexOf) || !UseSSE42Intrinsics) {
1050 return false;
1051 }
1052 Node* src = argument(0);
1053 Node* tgt = argument(1);
1054
1055 // Make the merge point
1056 RegionNode* result_rgn = new RegionNode(4);
1057 Node* result_phi = new PhiNode(result_rgn, TypeInt::INT);
1058
1059 // Get start addr and length of source string
1060 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
1061 Node* src_count = load_array_length(src);
1062
1063 // Get start addr and length of substring
1064 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1065 Node* tgt_count = load_array_length(tgt);
1066
1067 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
1068 // Divide src size by 2 if String is UTF16 encoded
1069 src_count = _gvn.transform(new RShiftINode(src_count, intcon(1)));
1070 }
1071 if (ae == StrIntrinsicNode::UU) {
1072 // Divide substring size by 2 if String is UTF16 encoded
1073 tgt_count = _gvn.transform(new RShiftINode(tgt_count, intcon(1)));
1074 }
1075
1076 // Check for substr count > string count
1077 Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
1078 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
1079 Node* if_gt = generate_slow_guard(bol, NULL);
1080 if (if_gt != NULL) {
1081 result_phi->init_req(2, intcon(-1));
1082 result_rgn->init_req(2, if_gt);
1083 }
1084
1085 if (!stopped()) {
1086 // Check for substr count == 0
1087 cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
1088 bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1089 Node* if_zero = generate_slow_guard(bol, NULL);
1090 if (if_zero != NULL) {
1091 result_phi->init_req(3, intcon(0));
1092 result_rgn->init_req(3, if_zero);
1093 }
1094 }
1095
1096 if (!stopped()) {
1097 Node* result = make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1098 result_phi->init_req(1, result);
1099 result_rgn->init_req(1, control());
1100 }
1101 set_control(_gvn.transform(result_rgn));
1102 record_for_igvn(result_rgn);
1103 set_result(_gvn.transform(result_phi));
1104
1105 return true;
1106 }
1107
1108 //-----------------------------inline_string_indexOf-----------------------
1109 bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
1110 if (!Matcher::has_match_rule(Op_StrIndexOf) || !UseSSE42Intrinsics) {
1111 return false;
1112 }
1113 assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments");
1114 Node* src = argument(0); // byte[]
1115 Node* src_count = argument(1);
1116 Node* tgt = argument(2); // byte[]
1117 Node* tgt_count = argument(3);
1118 Node* from_index = argument(4);
1119
1120 // Java code which calls this method has range checks for from_index value.
1121 src_count = _gvn.transform(new SubINode(src_count, from_index));
1122
1123 // Multiply byte array index by 2 if String is UTF16 encoded
1124 Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1125 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1126 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1127
1128 Node* result = make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1129
1130 // The result is index relative to from_index if substring was found, -1 otherwise.
1131 // Generate code which will fold into cmove.
1132 RegionNode* region = new RegionNode(3);
1133 Node* phi = new PhiNode(region, TypeInt::INT);
1134
1135 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1136 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1137
1138 Node* if_lt = generate_slow_guard(bol, NULL);
1139 if (if_lt != NULL) {
1140 // result == -1
1141 phi->init_req(2, result);
1142 region->init_req(2, if_lt);
1143 }
1144 if (!stopped()) {
1145 result = _gvn.transform(new AddINode(result, from_index));
1146 phi->init_req(1, result);
1147 region->init_req(1, control());
1148 }
1149
1150 set_control(_gvn.transform(region));
1151 record_for_igvn(region);
1152 set_result(_gvn.transform(phi));
1153
1154 return true;
1155 }
1156
1157 //-----------------------------inline_string_indexOfChar-----------------------
1158 bool LibraryCallKit::inline_string_indexOfChar() {
1159 if (!Matcher::has_match_rule(Op_StrIndexOfChar) || !(UseSSE > 4)) {
1160 return false;
1161 }
1162 assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments");
1163 Node* src = argument(0); // byte[]
1164 Node* tgt = argument(1); // tgt is int ch
1165 Node* from_index = argument(2);
1166 Node* max = argument(3);
1167
1168 Node* src_offset = _gvn.transform(new LShiftINode(from_index, intcon(1)));
1169 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1170
1171 Node* src_count = _gvn.transform(new SubINode(max, from_index));
1172
1173 RegionNode* region = new RegionNode(3);
1174 Node* phi = new PhiNode(region, TypeInt::INT);
1175
1176 Node* result = new StrIndexOfCharNode(control(), memory(TypeAryPtr::BYTES), src_start, src_count, tgt, StrIntrinsicNode::none);
1177 C->set_has_split_ifs(true); // Has chance for split-if optimization
1178 _gvn.transform(result);
1179
1180 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1181 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1182
1183 Node* if_lt = generate_slow_guard(bol, NULL);
1184 if (if_lt != NULL) {
1185 // result == -1
1186 phi->init_req(2, result);
1187 region->init_req(2, if_lt);
1188 }
1189 if (!stopped()) {
1190 result = _gvn.transform(new AddINode(result, from_index));
1191 phi->init_req(1, result);
1192 region->init_req(1, control());
1193 }
1194 set_control(_gvn.transform(region));
1195 record_for_igvn(region);
1196 set_result(_gvn.transform(phi));
1197
1198 return true;
1199 }
1200 //---------------------------inline_string_copy---------------------
1201 // compressIt == true --> generate a compressed copy operation (compress char[]/byte[] to byte[])
1202 // int StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
1203 // int StringUTF16.compress(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1204 // compressIt == false --> generate an inflated copy operation (inflate byte[] to char[]/byte[])
1205 // void StringLatin1.inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len)
1206 // void StringLatin1.inflate(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1207 bool LibraryCallKit::inline_string_copy(bool compress) {
1208 int nargs = 5; // 2 oops, 3 ints
1209 assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
1210
1211 Node* src = argument(0);
1212 Node* src_offset = argument(1);
1213 Node* dst = argument(2);
1214 Node* dst_offset = argument(3);
1215 Node* length = argument(4);
1216
1217 // Check for allocation before we add nodes that would confuse
1218 // tightly_coupled_allocation()
1219 AllocateArrayNode* alloc = tightly_coupled_allocation(dst, NULL);
1220
1221 // Figure out the size and type of the elements we will be copying.
1222 const Type* src_type = src->Value(&_gvn);
1223 const Type* dst_type = dst->Value(&_gvn);
1224 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
1225 BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
1226 assert((compress && dst_elem == T_BYTE && (src_elem == T_BYTE || src_elem == T_CHAR)) ||
1227 (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
1228 "Unsupported array types for inline_string_copy");
1229
1230 // Convert char[] offsets to byte[] offsets
1231 if (compress && src_elem == T_BYTE) {
1232 src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1233 } else if (!compress && dst_elem == T_BYTE) {
1234 dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1235 }
1236
1237 Node* src_start = array_element_address(src, src_offset, src_elem);
1238 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1239 // 'src_start' points to src array + scaled offset
1240 // 'dst_start' points to dst array + scaled offset
1241 Node* count;
1242 if (compress) {
1243 count = compress_string(src_start, dst_start, length);
1244 } else {
1245 inflate_string(src_start, dst_start, length);
1246 }
1247
1248 if (alloc != NULL) {
1249 if (alloc->maybe_set_complete(&_gvn)) {
1250 // "You break it, you buy it."
1251 InitializeNode* init = alloc->initialization();
1252 assert(init->is_complete(), "we just did this");
1253 init->set_complete_with_arraycopy();
1254 assert(dst->is_CheckCastPP(), "sanity");
1255 assert(dst->in(0)->in(0) == init, "dest pinned");
1256 }
1257 // Do not let stores that initialize this object be reordered with
1258 // a subsequent store that would make this object accessible by
1259 // other threads.
1260 // Record what AllocateNode this StoreStore protects so that
1261 // escape analysis can go from the MemBarStoreStoreNode to the
1262 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1263 // based on the escape status of the AllocateNode.
1264 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
1265 }
1266 if (compress) {
1267 set_result(_gvn.transform(count));
1268 }
1269 return true;
1270 }
1271
1272 #ifdef _LP64
1273 #define XTOP ,top() /*additional argument*/
1274 #else //_LP64
1275 #define XTOP /*no additional argument*/
1276 #endif //_LP64
1277
1278 //------------------------inline_string_toBytesU--------------------------
1279 // public static byte[] StringUTF16.toBytes(char[] value, int off, int len)
1280 bool LibraryCallKit::inline_string_toBytesU() {
1281 // Get the arguments.
1282 Node* value = argument(0);
1283 Node* offset = argument(1);
1284 Node* length = argument(2);
1285
1286 Node* newcopy = NULL;
1287
1288 // Set the original stack and the reexecute bit for the interpreter to reexecute
1289 // the bytecode that invokes StringUTF16.toBytes() if deoptimization happens.
1290 { PreserveReexecuteState preexecs(this);
1291 jvms()->set_should_reexecute(true);
1292
1293 // Check if a null path was taken unconditionally.
1294 value = null_check(value);
1295
1296 RegionNode* bailout = new RegionNode(1);
1297 record_for_igvn(bailout);
1298
1299 // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
1300 generate_negative_guard(length, bailout);
1301 generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout);
1302
1303 if (bailout->req() > 1) {
1304 PreserveJVMState pjvms(this);
1305 set_control(_gvn.transform(bailout));
1306 uncommon_trap(Deoptimization::Reason_intrinsic,
1307 Deoptimization::Action_maybe_recompile);
1308 }
1309 if (stopped()) return true;
1310
1311 // Range checks are done by caller.
1312
1313 Node* size = _gvn.transform(new LShiftINode(length, intcon(1)));
1314 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
1315 newcopy = new_array(klass_node, size, 0); // no arguments to push
1316 AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy, NULL);
1317
1318 // Calculate starting addresses.
1319 Node* src_start = array_element_address(value, offset, T_CHAR);
1320 Node* dst_start = basic_plus_adr(newcopy, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1321
1322 // Check if src array address is aligned to HeapWordSize (dst is always aligned)
1323 const TypeInt* toffset = gvn().type(offset)->is_int();
1324 bool aligned = toffset->is_con() && ((toffset->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1325
1326 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1327 const char* copyfunc_name = "arraycopy";
1328 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1329 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1330 OptoRuntime::fast_arraycopy_Type(),
1331 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1332 src_start, dst_start, ConvI2X(length) XTOP);
1333 // Do not let reads from the cloned object float above the arraycopy.
1334 if (alloc != NULL) {
1335 if (alloc->maybe_set_complete(&_gvn)) {
1336 // "You break it, you buy it."
1337 InitializeNode* init = alloc->initialization();
1338 assert(init->is_complete(), "we just did this");
1339 init->set_complete_with_arraycopy();
1340 assert(newcopy->is_CheckCastPP(), "sanity");
1341 assert(newcopy->in(0)->in(0) == init, "dest pinned");
1342 }
1343 // Do not let stores that initialize this object be reordered with
1344 // a subsequent store that would make this object accessible by
1345 // other threads.
1346 // Record what AllocateNode this StoreStore protects so that
1347 // escape analysis can go from the MemBarStoreStoreNode to the
1348 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1349 // based on the escape status of the AllocateNode.
1350 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
1351 } else {
1352 insert_mem_bar(Op_MemBarCPUOrder);
1353 }
1354 } // original reexecute is set back here
1355
1356 C->set_has_split_ifs(true); // Has chance for split-if optimization
1357 if (!stopped()) {
1358 set_result(newcopy);
1359 }
1360 return true;
1361 }
1362
1363 //------------------------inline_string_getCharsU--------------------------
1364 // public void StringUTF16.getChars(byte[] value, int srcBegin, int srcEnd, char dst[], int dstBegin)
1365 bool LibraryCallKit::inline_string_getCharsU() {
1366 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
1367
1368 // Get the arguments.
1369 Node* value = argument(0);
1370 Node* src_begin = argument(1);
1371 Node* src_end = argument(2); // exclusive offset (i < src_end)
1372 Node* dst = argument(3);
1373 Node* dst_begin = argument(4);
1374
1375 // Check for allocation before we add nodes that would confuse
1376 // tightly_coupled_allocation()
1377 AllocateArrayNode* alloc = tightly_coupled_allocation(dst, NULL);
1378
1379 // Check if a null path was taken unconditionally.
1380 value = null_check(value);
1381 dst = null_check(dst);
1382 if (stopped()) {
1383 return true;
1384 }
1385
1386 // Range checks are done by caller.
1387
1388 // Get length and convert char[] offset to byte[] offset
1389 Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1390 src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1391
1392 if (!stopped()) {
1393 // Calculate starting addresses.
1394 Node* src_start = array_element_address(value, src_begin, T_BYTE);
1395 Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1396
1397 // Check if array addresses are aligned to HeapWordSize
1398 const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1399 const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1400 bool aligned = tsrc->is_con() && ((tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1401 tdst->is_con() && ((tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1402
1403 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1404 const char* copyfunc_name = "arraycopy";
1405 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1406 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1407 OptoRuntime::fast_arraycopy_Type(),
1408 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1409 src_start, dst_start, ConvI2X(length) XTOP);
1410 // Do not let reads from the cloned object float above the arraycopy.
1411 if (alloc != NULL) {
1412 if (alloc->maybe_set_complete(&_gvn)) {
1413 // "You break it, you buy it."
1414 InitializeNode* init = alloc->initialization();
1415 assert(init->is_complete(), "we just did this");
1416 init->set_complete_with_arraycopy();
1417 assert(dst->is_CheckCastPP(), "sanity");
1418 assert(dst->in(0)->in(0) == init, "dest pinned");
1419 }
1420 // Do not let stores that initialize this object be reordered with
1421 // a subsequent store that would make this object accessible by
1422 // other threads.
1423 // Record what AllocateNode this StoreStore protects so that
1424 // escape analysis can go from the MemBarStoreStoreNode to the
1425 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1426 // based on the escape status of the AllocateNode.
1427 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
1428 } else {
1429 insert_mem_bar(Op_MemBarCPUOrder);
1430 }
1431 }
1432
1433 C->set_has_split_ifs(true); // Has chance for split-if optimization
1434 return true;
1435 }
1436
1437 //----------------------inline_string_char_access----------------------------
1438 // Store/Load char to/from byte[] array.
1439 // static void StringUTF16.putChar(byte[] val, int index, int c)
1440 // static char StringUTF16.getChar(byte[] val, int index)
1441 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1442 Node* value = argument(0);
1443 Node* index = argument(1);
1444 Node* ch = is_store ? argument(2) : NULL;
1445
1446 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1447 // correctly requires matched array shapes.
1448 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1449 "sanity: byte[] and char[] bases agree");
1450 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1451 "sanity: byte[] and char[] scales agree");
1452
1453 Node* adr = array_element_address(value, index, T_CHAR);
1454 if (is_store) {
1455 (void) store_to_memory(control(), adr, ch, T_CHAR, TypeAryPtr::BYTES, MemNode::unordered);
1456 } else {
1457 ch = make_load(control(), adr, TypeInt::CHAR, T_CHAR, MemNode::unordered);
1458 set_result(ch);
1459 }
1460 return true;
1461 }
1462
1463 //--------------------------round_double_node--------------------------------
1464 // Round a double node if necessary.
1465 Node* LibraryCallKit::round_double_node(Node* n) {
1466 if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1467 n = _gvn.transform(new RoundDoubleNode(0, n));
1468 return n;
1469 }
1470
1471 //------------------------------inline_math-----------------------------------
1472 // public static double Math.abs(double)
1473 // public static double Math.sqrt(double)
1474 // public static double Math.log(double)
1475 // public static double Math.log10(double)
1476 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1477 Node* arg = round_double_node(argument(0));
1478 Node* n;
1479 switch (id) {
4281 r->init_req(2, iffalse);
4282
4283 // Post merge
4284 set_control(_gvn.transform(r));
4285 record_for_igvn(r);
4286
4287 C->set_has_split_ifs(true); // Has chance for split-if optimization
4288 result = phi;
4289 assert(result->bottom_type()->isa_int(), "must be");
4290 break;
4291 }
4292
4293 default:
4294 fatal_unexpected_iid(id);
4295 break;
4296 }
4297 set_result(_gvn.transform(result));
4298 return true;
4299 }
4300
4301 //----------------------inline_unsafe_copyMemory-------------------------
4302 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4303 bool LibraryCallKit::inline_unsafe_copyMemory() {
4304 if (callee()->is_static()) return false; // caller must have the capability!
4305 null_check_receiver(); // null-check receiver
4306 if (stopped()) return true;
4307
4308 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
4309
4310 Node* src_ptr = argument(1); // type: oop
4311 Node* src_off = ConvL2X(argument(2)); // type: long
4312 Node* dst_ptr = argument(4); // type: oop
4313 Node* dst_off = ConvL2X(argument(5)); // type: long
4314 Node* size = ConvL2X(argument(7)); // type: long
4315
4316 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4317 "fieldOffset must be byte-scaled");
4318
4319 Node* src = make_unsafe_address(src_ptr, src_off);
4320 Node* dst = make_unsafe_address(dst_ptr, dst_off);
5089 // no receiver since it is static method
5090 Node *src = argument(0);
5091 Node *src_offset = argument(1);
5092 Node *dst = argument(2);
5093 Node *dst_offset = argument(3);
5094 Node *length = argument(4);
5095
5096 const Type* src_type = src->Value(&_gvn);
5097 const Type* dst_type = dst->Value(&_gvn);
5098 const TypeAryPtr* top_src = src_type->isa_aryptr();
5099 const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5100 if (top_src == NULL || top_src->klass() == NULL ||
5101 top_dest == NULL || top_dest->klass() == NULL) {
5102 // failed array check
5103 return false;
5104 }
5105
5106 // Figure out the size and type of the elements we will be copying.
5107 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5108 BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5109 if (!((src_elem == T_CHAR) || (src_elem== T_BYTE)) || dst_elem != T_BYTE) {
5110 return false;
5111 }
5112
5113 Node* src_start = array_element_address(src, src_offset, T_CHAR);
5114 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5115 // 'src_start' points to src array + scaled offset
5116 // 'dst_start' points to dst array + scaled offset
5117
5118 const TypeAryPtr* mtype = TypeAryPtr::BYTES;
5119 Node* enc = new EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length);
5120 enc = _gvn.transform(enc);
5121 Node* res_mem = _gvn.transform(new SCMemProjNode(enc));
5122 set_memory(res_mem, mtype);
5123 set_result(enc);
5124 return true;
5125 }
5126
5127 //-------------inline_multiplyToLen-----------------------------------
5128 bool LibraryCallKit::inline_multiplyToLen() {
5129 assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
5130
5131 address stubAddr = StubRoutines::multiplyToLen();
5132 if (stubAddr == NULL) {
5133 return false; // Intrinsic's stub is not implemented on this platform
5211 _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));
5212 // Final sync IdealKit and GraphKit.
5213 final_sync(ideal);
5214 #undef __
5215
5216 Node* z_start = array_element_address(z, intcon(0), T_INT);
5217
5218 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5219 OptoRuntime::multiplyToLen_Type(),
5220 stubAddr, stubName, TypePtr::BOTTOM,
5221 x_start, xlen, y_start, ylen, z_start, zlen);
5222 } // original reexecute is set back here
5223
5224 C->set_has_split_ifs(true); // Has chance for split-if optimization
5225 set_result(z);
5226 return true;
5227 }
5228
5229 //-------------inline_squareToLen------------------------------------
5230 bool LibraryCallKit::inline_squareToLen() {
5231 assert(UseSquareToLenIntrinsic, "not implemented on this platform");
5232
5233 address stubAddr = StubRoutines::squareToLen();
5234 if (stubAddr == NULL) {
5235 return false; // Intrinsic's stub is not implemented on this platform
5236 }
5237 const char* stubName = "squareToLen";
5238
5239 assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5240
5241 Node* x = argument(0);
5242 Node* len = argument(1);
5243 Node* z = argument(2);
5244 Node* zlen = argument(3);
5245
5246 const Type* x_type = x->Value(&_gvn);
5247 const Type* z_type = z->Value(&_gvn);
5248 const TypeAryPtr* top_x = x_type->isa_aryptr();
5249 const TypeAryPtr* top_z = z_type->isa_aryptr();
5250 if (top_x == NULL || top_x->klass() == NULL ||
5251 top_z == NULL || top_z->klass() == NULL) {
5257 BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5258 if (x_elem != T_INT || z_elem != T_INT) {
5259 return false;
5260 }
5261
5262
5263 Node* x_start = array_element_address(x, intcon(0), x_elem);
5264 Node* z_start = array_element_address(z, intcon(0), z_elem);
5265
5266 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5267 OptoRuntime::squareToLen_Type(),
5268 stubAddr, stubName, TypePtr::BOTTOM,
5269 x_start, len, z_start, zlen);
5270
5271 set_result(z);
5272 return true;
5273 }
5274
5275 //-------------inline_mulAdd------------------------------------------
5276 bool LibraryCallKit::inline_mulAdd() {
5277 assert(UseMulAddIntrinsic, "not implemented on this platform");
5278
5279 address stubAddr = StubRoutines::mulAdd();
5280 if (stubAddr == NULL) {
5281 return false; // Intrinsic's stub is not implemented on this platform
5282 }
5283 const char* stubName = "mulAdd";
5284
5285 assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
5286
5287 Node* out = argument(0);
5288 Node* in = argument(1);
5289 Node* offset = argument(2);
5290 Node* len = argument(3);
5291 Node* k = argument(4);
5292
5293 const Type* out_type = out->Value(&_gvn);
5294 const Type* in_type = in->Value(&_gvn);
5295 const TypeAryPtr* top_out = out_type->isa_aryptr();
5296 const TypeAryPtr* top_in = in_type->isa_aryptr();
5297 if (top_out == NULL || top_out->klass() == NULL ||
|