507 if (_flow->failing()) {
508 C->record_method_not_compilable(_flow->failure_reason());
509 #ifndef PRODUCT
510 if (PrintOpto && (Verbose || WizardMode)) {
511 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
512 if (Verbose) {
513 method()->print();
514 method()->print_codes();
515 _flow->print();
516 }
517 }
518 #endif
519 }
520 _tf = C->tf(); // the OSR entry type is different
521 }
522
523 #ifdef ASSERT
524 if (depth() == 1) {
525 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
526 if (C->tf() != tf()) {
527 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
528 assert(C->env()->system_dictionary_modification_counter_changed(),
529 "Must invalidate if TypeFuncs differ");
530 }
531 } else {
532 assert(!this->is_osr_parse(), "no recursive OSR");
533 }
534 #endif
535
536 #ifndef PRODUCT
537 methods_parsed++;
538 // add method size here to guarantee that inlined methods are added too
539 if (CITime)
540 _total_bytes_compiled += method()->code_size();
541
542 show_parse_info();
543 #endif
544
545 if (failing()) {
546 if (log) log->done("parse");
547 return;
1024 tty->print_cr(" writes @Stable and needs a memory barrier");
1025 }
1026 }
1027
1028 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1029 // transform each slice of the original memphi:
1030 mms.set_memory(_gvn.transform(mms.memory()));
1031 }
1032
1033 if (tf()->range()->cnt() > TypeFunc::Parms) {
1034 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1035 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1036 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1037 // In case of concurrent class loading, the type we set for the
1038 // ret_phi in build_exits() may have been too optimistic and the
1039 // ret_phi may be top now.
1040 // Otherwise, we've encountered an error and have to mark the method as
1041 // not compilable. Just using an assertion instead would be dangerous
1042 // as this could lead to an infinite compile loop in non-debug builds.
1043 {
1044 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
1045 if (C->env()->system_dictionary_modification_counter_changed()) {
1046 C->record_failure(C2Compiler::retry_class_loading_during_parsing());
1047 } else {
1048 C->record_method_not_compilable("Can't determine return type.");
1049 }
1050 }
1051 return;
1052 }
1053 if (ret_type->isa_int()) {
1054 BasicType ret_bt = method()->return_type()->basic_type();
1055 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1056 }
1057 _exits.push_node(ret_type->basic_type(), ret_phi);
1058 }
1059
1060 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1061
1062 // Unlock along the exceptional paths.
1063 // This is done late so that we can common up equivalent exceptions
1064 // (e.g., null checks) arising from multiple points within this method.
|
507 if (_flow->failing()) {
508 C->record_method_not_compilable(_flow->failure_reason());
509 #ifndef PRODUCT
510 if (PrintOpto && (Verbose || WizardMode)) {
511 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
512 if (Verbose) {
513 method()->print();
514 method()->print_codes();
515 _flow->print();
516 }
517 }
518 #endif
519 }
520 _tf = C->tf(); // the OSR entry type is different
521 }
522
523 #ifdef ASSERT
524 if (depth() == 1) {
525 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
526 if (C->tf() != tf()) {
527 MutexLocker ml(Compile_lock);
528 assert(C->env()->system_dictionary_modification_counter_changed(),
529 "Must invalidate if TypeFuncs differ");
530 }
531 } else {
532 assert(!this->is_osr_parse(), "no recursive OSR");
533 }
534 #endif
535
536 #ifndef PRODUCT
537 methods_parsed++;
538 // add method size here to guarantee that inlined methods are added too
539 if (CITime)
540 _total_bytes_compiled += method()->code_size();
541
542 show_parse_info();
543 #endif
544
545 if (failing()) {
546 if (log) log->done("parse");
547 return;
1024 tty->print_cr(" writes @Stable and needs a memory barrier");
1025 }
1026 }
1027
1028 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1029 // transform each slice of the original memphi:
1030 mms.set_memory(_gvn.transform(mms.memory()));
1031 }
1032
1033 if (tf()->range()->cnt() > TypeFunc::Parms) {
1034 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1035 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1036 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1037 // In case of concurrent class loading, the type we set for the
1038 // ret_phi in build_exits() may have been too optimistic and the
1039 // ret_phi may be top now.
1040 // Otherwise, we've encountered an error and have to mark the method as
1041 // not compilable. Just using an assertion instead would be dangerous
1042 // as this could lead to an infinite compile loop in non-debug builds.
1043 {
1044 MutexLocker ml(Compile_lock);
1045 if (C->env()->system_dictionary_modification_counter_changed()) {
1046 C->record_failure(C2Compiler::retry_class_loading_during_parsing());
1047 } else {
1048 C->record_method_not_compilable("Can't determine return type.");
1049 }
1050 }
1051 return;
1052 }
1053 if (ret_type->isa_int()) {
1054 BasicType ret_bt = method()->return_type()->basic_type();
1055 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1056 }
1057 _exits.push_node(ret_type->basic_type(), ret_phi);
1058 }
1059
1060 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1061
1062 // Unlock along the exceptional paths.
1063 // This is done late so that we can common up equivalent exceptions
1064 // (e.g., null checks) arising from multiple points within this method.
|