1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/idealKit.hpp" 40 #include "opto/matcher.hpp" 41 #include "opto/memnode.hpp" 42 #include "opto/mulnode.hpp" 43 #include "opto/opaquenode.hpp" 44 #include "opto/parse.hpp" 45 #include "opto/runtime.hpp" 46 #include "opto/valuetypenode.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 50 #ifndef PRODUCT 51 extern int explicit_null_checks_inserted, 52 explicit_null_checks_elided; 53 #endif 54 55 //---------------------------------array_load---------------------------------- 56 void Parse::array_load(BasicType bt) { 57 const Type* elemtype = Type::TOP; 58 Node* adr = array_addressing(bt, 0, &elemtype); 59 if (stopped()) return; // guaranteed null or range check 60 61 Node* idx = pop(); 62 Node* ary = pop(); 63 64 // Handle value type arrays 65 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 66 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 67 if (elemtype->isa_valuetype() != NULL) { 68 C->set_flattened_accesses(); 69 // Load from flattened value type array 70 Node* vt = ValueTypeNode::make_from_flattened(this, elemtype->value_klass(), ary, adr); 71 push(vt); 72 return; 73 } else if (elemptr != NULL && elemptr->is_valuetypeptr() && !elemptr->maybe_null()) { 74 // Load from non-flattened but flattenable value type array (elements can never be null) 75 bt = T_VALUETYPE; 76 } else if (ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type() && 77 !ary_t->klass_is_exact() && (!elemptr->is_valuetypeptr() || elemptr->value_klass()->flatten_array())) { 78 // Cannot statically determine if array is flattened, emit runtime check 79 Node* ctl = control(); 80 IdealKit ideal(this); 81 IdealVariable res(ideal); 82 ideal.declarations_done(); 83 Node* kls = load_object_klass(ary); 84 Node* tag = load_lh_array_tag(kls); 85 ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { 86 // non-flattened 87 sync_kit(ideal); 88 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 89 Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt, 90 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD, ctl); 91 ideal.sync_kit(this); 92 ideal.set(res, ld); 93 } ideal.else_(); { 94 // flattened 95 sync_kit(ideal); 96 if (elemptr->is_valuetypeptr()) { 97 // Element type is known, cast and load from flattened representation 98 assert(elemptr->maybe_null(), "must be nullable"); 99 ciValueKlass* vk = elemptr->value_klass(); 100 assert(vk->flatten_array(), "must be flattenable"); 101 ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true); 102 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); 103 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); 104 adr = array_element_address(cast, idx, T_VALUETYPE, ary_t->size(), control()); 105 Node* vt = ValueTypeNode::make_from_flattened(this, vk, cast, adr)->allocate(this, false, false)->get_oop(); 106 ideal.set(res, vt); 107 ideal.sync_kit(this); 108 } else { 109 // Element type is unknown, emit runtime call 110 assert(!ary_t->klass_is_exact(), "should not have exact type here"); 111 Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset())); 112 Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); 113 Node* obj_size = NULL; 114 kill_dead_locals(); 115 inc_sp(2); 116 Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true); 117 dec_sp(2); 118 119 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); 120 assert(alloc->maybe_set_complete(&_gvn), ""); 121 alloc->initialization()->set_complete_with_arraycopy(); 122 123 // This membar keeps this access to an unknown flattened array 124 // correctly ordered with other unknown and known flattened 125 // array accesses. 126 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES)); 127 128 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 129 // Unknown value type might contain reference fields 130 if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) { 131 int base_off = sizeof(instanceOopDesc); 132 Node* dst_base = basic_plus_adr(alloc_obj, base_off); 133 Node* countx = obj_size; 134 countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); 135 countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); 136 137 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); 138 Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset())); 139 Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); 140 uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE); 141 Node* base = basic_plus_adr(ary, header); 142 idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control()); 143 Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift)); 144 Node* adr = basic_plus_adr(ary, base, scale); 145 146 access_clone(adr, dst_base, countx, false); 147 } else { 148 ideal.sync_kit(this); 149 ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(), 150 CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value), 151 "load_unknown_value", 152 ary, idx, alloc_obj); 153 sync_kit(ideal); 154 } 155 156 // This makes sure no other thread sees a partially initialized buffered value 157 insert_mem_bar_volatile(Op_MemBarStoreStore, Compile::AliasIdxRaw, alloc->proj_out_or_null(AllocateNode::RawAddress)); 158 159 // Same as MemBarCPUOrder above: keep this unknown flattened 160 // array access correctly ordered with other flattened array 161 // access 162 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES)); 163 164 // Prevent any use of the newly allocated value before it is 165 // fully initialized 166 alloc_obj = new CastPPNode(alloc_obj, _gvn.type(alloc_obj), true); 167 alloc_obj->set_req(0, control()); 168 alloc_obj = _gvn.transform(alloc_obj); 169 170 ideal.sync_kit(this); 171 172 ideal.set(res, alloc_obj); 173 } 174 } ideal.end_if(); 175 sync_kit(ideal); 176 push_node(bt, _gvn.transform(ideal.value(res))); 177 return; 178 } 179 180 if (elemtype == TypeInt::BOOL) { 181 bt = T_BOOLEAN; 182 } else if (bt == T_OBJECT) { 183 elemtype = ary_t->elem()->make_oopptr(); 184 } 185 186 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 187 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, 188 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 189 if (bt == T_VALUETYPE) { 190 // Loading a non-flattened (but flattenable) value type from an array 191 assert(!gvn().type(ld)->maybe_null(), "value type array elements should never be null"); 192 if (elemptr->value_klass()->is_scalarizable()) { 193 ld = ValueTypeNode::make_from_oop(this, ld, elemptr->value_klass()); 194 } 195 } 196 197 push_node(bt, ld); 198 } 199 200 201 //--------------------------------array_store---------------------------------- 202 void Parse::array_store(BasicType bt) { 203 const Type* elemtype = Type::TOP; 204 Node* adr = array_addressing(bt, type2size[bt], &elemtype); 205 if (stopped()) return; // guaranteed null or range check 206 Node* cast_val = NULL; 207 if (bt == T_OBJECT) { 208 cast_val = array_store_check(); 209 if (stopped()) return; 210 } 211 Node* val = pop_node(bt); // Value to store 212 Node* idx = pop(); // Index in the array 213 Node* ary = pop(); // The array itself 214 215 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 216 if (bt == T_OBJECT) { 217 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 218 const Type* val_t = _gvn.type(val); 219 if (elemtype->isa_valuetype() != NULL) { 220 C->set_flattened_accesses(); 221 // Store to flattened value type array 222 if (!cast_val->is_ValueType()) { 223 inc_sp(3); 224 cast_val = null_check(cast_val); 225 if (stopped()) return; 226 dec_sp(3); 227 cast_val = ValueTypeNode::make_from_oop(this, cast_val, elemtype->value_klass()); 228 } 229 cast_val->as_ValueType()->store_flattened(this, ary, adr); 230 return; 231 } else if (elemptr->is_valuetypeptr() && !elemptr->maybe_null()) { 232 // Store to non-flattened but flattenable value type array (elements can never be null) 233 if (!cast_val->is_ValueType()) { 234 inc_sp(3); 235 cast_val = null_check(cast_val); 236 if (stopped()) return; 237 dec_sp(3); 238 } 239 } else if (elemptr->can_be_value_type() && (!ary_t->klass_is_exact() || elemptr->is_valuetypeptr()) && 240 (val->is_ValueType() || val_t == TypePtr::NULL_PTR || val_t->is_oopptr()->can_be_value_type())) { 241 // Cannot statically determine if array is flattened, emit runtime check 242 ciValueKlass* vk = NULL; 243 // Try to determine the value klass 244 if (val->is_ValueType()) { 245 vk = val_t->value_klass(); 246 } else if (elemptr->is_valuetypeptr()) { 247 vk = elemptr->value_klass(); 248 } 249 if (ValueArrayFlatten && (vk == NULL || vk->flatten_array())) { 250 IdealKit ideal(this); 251 Node* kls = load_object_klass(ary); 252 Node* layout_val = load_lh_array_tag(kls); 253 ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { 254 // non-flattened 255 sync_kit(ideal); 256 gen_value_array_null_guard(ary, val, 3); 257 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 258 elemtype = ary_t->elem()->make_oopptr(); 259 access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false, false); 260 ideal.sync_kit(this); 261 } ideal.else_(); { 262 // flattened 263 if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { 264 // Add null check 265 sync_kit(ideal); 266 Node* null_ctl = top(); 267 val = null_check_oop(val, &null_ctl); 268 if (null_ctl != top()) { 269 PreserveJVMState pjvms(this); 270 inc_sp(3); 271 set_control(null_ctl); 272 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); 273 dec_sp(3); 274 } 275 ideal.sync_kit(this); 276 } 277 if (vk != NULL && !stopped()) { 278 // Element type is known, cast and store to flattened representation 279 sync_kit(ideal); 280 assert(vk->flatten_array(), "must be flattenable"); 281 assert(elemptr->maybe_null(), "must be nullable"); 282 ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true); 283 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); 284 ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); 285 adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control()); 286 if (!val->is_ValueType()) { 287 assert(!gvn().type(val)->maybe_null(), "value type array elements should never be null"); 288 val = ValueTypeNode::make_from_oop(this, val, vk); 289 } 290 val->as_ValueType()->store_flattened(this, ary, adr); 291 ideal.sync_kit(this); 292 } else if (!ideal.ctrl()->is_top()) { 293 // Element type is unknown, emit runtime call 294 assert(!ary_t->klass_is_exact(), "should not have exact type here"); 295 sync_kit(ideal); 296 297 // This membar keeps this access to an unknown flattened 298 // array correctly ordered with other unknown and known 299 // flattened array accesses. 300 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES)); 301 ideal.sync_kit(this); 302 303 ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(), 304 CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value), 305 "store_unknown_value", 306 val, ary, idx); 307 308 sync_kit(ideal); 309 // Same as MemBarCPUOrder above: keep this unknown 310 // flattened array access correctly ordered with other 311 // flattened array access 312 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES)); 313 ideal.sync_kit(this); 314 315 } 316 } ideal.end_if(); 317 sync_kit(ideal); 318 return; 319 } else { 320 gen_value_array_null_guard(ary, val, 3); 321 } 322 } 323 } 324 325 if (elemtype == TypeInt::BOOL) { 326 bt = T_BOOLEAN; 327 } else if (bt == T_OBJECT) { 328 elemtype = ary_t->elem()->make_oopptr(); 329 } 330 331 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 332 333 access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); 334 } 335 336 337 //------------------------------array_addressing------------------------------- 338 // Pull array and index from the stack. Compute pointer-to-element. 339 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 340 Node *idx = peek(0+vals); // Get from stack without popping 341 Node *ary = peek(1+vals); // in case of exception 342 343 // Null check the array base, with correct stack contents 344 ary = null_check(ary, T_ARRAY); 345 // Compile-time detect of null-exception? 346 if (stopped()) return top(); 347 348 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 349 const TypeInt* sizetype = arytype->size(); 350 const Type* elemtype = arytype->elem(); 351 352 if (UseUniqueSubclasses && result2 != NULL) { 353 const Type* el = elemtype->make_ptr(); 354 if (el && el->isa_instptr()) { 355 const TypeInstPtr* toop = el->is_instptr(); 356 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 357 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 358 const Type* subklass = Type::get_const_type(toop->klass()); 359 elemtype = subklass->join_speculative(el); 360 } 361 } 362 } 363 364 // Check for big class initializers with all constant offsets 365 // feeding into a known-size array. 366 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 367 // See if the highest idx value is less than the lowest array bound, 368 // and if the idx value cannot be negative: 369 bool need_range_check = true; 370 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 371 need_range_check = false; 372 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 373 } 374 375 ciKlass * arytype_klass = arytype->klass(); 376 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 377 // Only fails for some -Xcomp runs 378 // The class is unloaded. We have to run this bytecode in the interpreter. 379 uncommon_trap(Deoptimization::Reason_unloaded, 380 Deoptimization::Action_reinterpret, 381 arytype->klass(), "!loaded array"); 382 return top(); 383 } 384 385 // Do the range check 386 if (GenerateRangeChecks && need_range_check) { 387 Node* tst; 388 if (sizetype->_hi <= 0) { 389 // The greatest array bound is negative, so we can conclude that we're 390 // compiling unreachable code, but the unsigned compare trick used below 391 // only works with non-negative lengths. Instead, hack "tst" to be zero so 392 // the uncommon_trap path will always be taken. 393 tst = _gvn.intcon(0); 394 } else { 395 // Range is constant in array-oop, so we can use the original state of mem 396 Node* len = load_array_length(ary); 397 398 // Test length vs index (standard trick using unsigned compare) 399 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 400 BoolTest::mask btest = BoolTest::lt; 401 tst = _gvn.transform( new BoolNode(chk, btest) ); 402 } 403 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 404 _gvn.set_type(rc, rc->Value(&_gvn)); 405 if (!tst->is_Con()) { 406 record_for_igvn(rc); 407 } 408 set_control(_gvn.transform(new IfTrueNode(rc))); 409 // Branch to failure if out of bounds 410 { 411 PreserveJVMState pjvms(this); 412 set_control(_gvn.transform(new IfFalseNode(rc))); 413 if (C->allow_range_check_smearing()) { 414 // Do not use builtin_throw, since range checks are sometimes 415 // made more stringent by an optimistic transformation. 416 // This creates "tentative" range checks at this point, 417 // which are not guaranteed to throw exceptions. 418 // See IfNode::Ideal, is_range_check, adjust_check. 419 uncommon_trap(Deoptimization::Reason_range_check, 420 Deoptimization::Action_make_not_entrant, 421 NULL, "range_check"); 422 } else { 423 // If we have already recompiled with the range-check-widening 424 // heroic optimization turned off, then we must really be throwing 425 // range check exceptions. 426 builtin_throw(Deoptimization::Reason_range_check, idx); 427 } 428 } 429 } 430 // Check for always knowing you are throwing a range-check exception 431 if (stopped()) return top(); 432 433 // Make array address computation control dependent to prevent it 434 // from floating above the range check during loop optimizations. 435 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 436 437 if (result2 != NULL) *result2 = elemtype; 438 439 assert(ptr != top(), "top should go hand-in-hand with stopped"); 440 441 return ptr; 442 } 443 444 445 // returns IfNode 446 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) { 447 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 448 Node *tst = _gvn.transform(new BoolNode(cmp, mask)); 449 IfNode *iff = create_and_map_if(control(), tst, prob, cnt); 450 return iff; 451 } 452 453 // return Region node 454 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 455 Node *region = new RegionNode(3); // 2 results 456 record_for_igvn(region); 457 region->init_req(1, iffalse); 458 region->init_req(2, iftrue ); 459 _gvn.set_type(region, Type::CONTROL); 460 region = _gvn.transform(region); 461 set_control (region); 462 return region; 463 } 464 465 // sentinel value for the target bci to mark never taken branches 466 // (according to profiling) 467 static const int never_reached = INT_MAX; 468 469 //------------------------------helper for tableswitch------------------------- 470 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 471 // True branch, use existing map info 472 { PreserveJVMState pjvms(this); 473 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 474 set_control( iftrue ); 475 if (unc) { 476 repush_if_args(); 477 uncommon_trap(Deoptimization::Reason_unstable_if, 478 Deoptimization::Action_reinterpret, 479 NULL, 480 "taken always"); 481 } else { 482 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 483 profile_switch_case(prof_table_index); 484 merge_new_path(dest_bci_if_true); 485 } 486 } 487 488 // False branch 489 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 490 set_control( iffalse ); 491 } 492 493 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 494 // True branch, use existing map info 495 { PreserveJVMState pjvms(this); 496 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 497 set_control( iffalse ); 498 if (unc) { 499 repush_if_args(); 500 uncommon_trap(Deoptimization::Reason_unstable_if, 501 Deoptimization::Action_reinterpret, 502 NULL, 503 "taken never"); 504 } else { 505 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 506 profile_switch_case(prof_table_index); 507 merge_new_path(dest_bci_if_true); 508 } 509 } 510 511 // False branch 512 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 513 set_control( iftrue ); 514 } 515 516 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index, bool unc) { 517 // False branch, use existing map and control() 518 if (unc) { 519 repush_if_args(); 520 uncommon_trap(Deoptimization::Reason_unstable_if, 521 Deoptimization::Action_reinterpret, 522 NULL, 523 "taken never"); 524 } else { 525 assert(dest_bci != never_reached, "inconsistent dest"); 526 profile_switch_case(prof_table_index); 527 merge_new_path(dest_bci); 528 } 529 } 530 531 532 extern "C" { 533 static int jint_cmp(const void *i, const void *j) { 534 int a = *(jint *)i; 535 int b = *(jint *)j; 536 return a > b ? 1 : a < b ? -1 : 0; 537 } 538 } 539 540 541 // Default value for methodData switch indexing. Must be a negative value to avoid 542 // conflict with any legal switch index. 543 #define NullTableIndex -1 544 545 class SwitchRange : public StackObj { 546 // a range of integers coupled with a bci destination 547 jint _lo; // inclusive lower limit 548 jint _hi; // inclusive upper limit 549 int _dest; 550 int _table_index; // index into method data table 551 float _cnt; // how many times this range was hit according to profiling 552 553 public: 554 jint lo() const { return _lo; } 555 jint hi() const { return _hi; } 556 int dest() const { return _dest; } 557 int table_index() const { return _table_index; } 558 bool is_singleton() const { return _lo == _hi; } 559 float cnt() const { return _cnt; } 560 561 void setRange(jint lo, jint hi, int dest, int table_index, float cnt) { 562 assert(lo <= hi, "must be a non-empty range"); 563 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; _cnt = cnt; 564 assert(_cnt >= 0, ""); 565 } 566 bool adjoinRange(jint lo, jint hi, int dest, int table_index, float cnt, bool trim_ranges) { 567 assert(lo <= hi, "must be a non-empty range"); 568 if (lo == _hi+1 && table_index == _table_index) { 569 // see merge_ranges() comment below 570 if (trim_ranges) { 571 if (cnt == 0) { 572 if (_cnt != 0) { 573 return false; 574 } 575 if (dest != _dest) { 576 _dest = never_reached; 577 } 578 } else { 579 if (_cnt == 0) { 580 return false; 581 } 582 if (dest != _dest) { 583 return false; 584 } 585 } 586 } else { 587 if (dest != _dest) { 588 return false; 589 } 590 } 591 _hi = hi; 592 _cnt += cnt; 593 return true; 594 } 595 return false; 596 } 597 598 void set (jint value, int dest, int table_index, float cnt) { 599 setRange(value, value, dest, table_index, cnt); 600 } 601 bool adjoin(jint value, int dest, int table_index, float cnt, bool trim_ranges) { 602 return adjoinRange(value, value, dest, table_index, cnt, trim_ranges); 603 } 604 bool adjoin(SwitchRange& other) { 605 return adjoinRange(other._lo, other._hi, other._dest, other._table_index, other._cnt, false); 606 } 607 608 void print() { 609 if (is_singleton()) 610 tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt()); 611 else if (lo() == min_jint) 612 tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt()); 613 else if (hi() == max_jint) 614 tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt()); 615 else 616 tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt()); 617 } 618 }; 619 620 // We try to minimize the number of ranges and the size of the taken 621 // ones using profiling data. When ranges are created, 622 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge 623 // if both were never hit or both were hit to build longer unreached 624 // ranges. Here, we now merge adjoining ranges with the same 625 // destination and finally set destination of unreached ranges to the 626 // special value never_reached because it can help minimize the number 627 // of tests that are necessary. 628 // 629 // For instance: 630 // [0, 1] to target1 sometimes taken 631 // [1, 2] to target1 never taken 632 // [2, 3] to target2 never taken 633 // would lead to: 634 // [0, 1] to target1 sometimes taken 635 // [1, 3] never taken 636 // 637 // (first 2 ranges to target1 are not merged) 638 static void merge_ranges(SwitchRange* ranges, int& rp) { 639 if (rp == 0) { 640 return; 641 } 642 int shift = 0; 643 for (int j = 0; j < rp; j++) { 644 SwitchRange& r1 = ranges[j-shift]; 645 SwitchRange& r2 = ranges[j+1]; 646 if (r1.adjoin(r2)) { 647 shift++; 648 } else if (shift > 0) { 649 ranges[j+1-shift] = r2; 650 } 651 } 652 rp -= shift; 653 for (int j = 0; j <= rp; j++) { 654 SwitchRange& r = ranges[j]; 655 if (r.cnt() == 0 && r.dest() != never_reached) { 656 r.setRange(r.lo(), r.hi(), never_reached, r.table_index(), r.cnt()); 657 } 658 } 659 } 660 661 //-------------------------------do_tableswitch-------------------------------- 662 void Parse::do_tableswitch() { 663 Node* lookup = pop(); 664 // Get information about tableswitch 665 int default_dest = iter().get_dest_table(0); 666 int lo_index = iter().get_int_table(1); 667 int hi_index = iter().get_int_table(2); 668 int len = hi_index - lo_index + 1; 669 670 if (len < 1) { 671 // If this is a backward branch, add safepoint 672 maybe_add_safepoint(default_dest); 673 merge(default_dest); 674 return; 675 } 676 677 ciMethodData* methodData = method()->method_data(); 678 ciMultiBranchData* profile = NULL; 679 if (methodData->is_mature() && UseSwitchProfiling) { 680 ciProfileData* data = methodData->bci_to_data(bci()); 681 if (data != NULL && data->is_MultiBranchData()) { 682 profile = (ciMultiBranchData*)data; 683 } 684 } 685 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 686 687 // generate decision tree, using trichotomy when possible 688 int rnum = len+2; 689 bool makes_backward_branch = false; 690 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 691 int rp = -1; 692 if (lo_index != min_jint) { 693 uint cnt = 1; 694 if (profile != NULL) { 695 cnt = profile->default_count() / (hi_index != max_jint ? 2 : 1); 696 } 697 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex, cnt); 698 } 699 for (int j = 0; j < len; j++) { 700 jint match_int = lo_index+j; 701 int dest = iter().get_dest_table(j+3); 702 makes_backward_branch |= (dest <= bci()); 703 int table_index = method_data_update() ? j : NullTableIndex; 704 uint cnt = 1; 705 if (profile != NULL) { 706 cnt = profile->count_at(j); 707 } 708 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 709 ranges[++rp].set(match_int, dest, table_index, cnt); 710 } 711 } 712 jint highest = lo_index+(len-1); 713 assert(ranges[rp].hi() == highest, ""); 714 if (highest != max_jint) { 715 uint cnt = 1; 716 if (profile != NULL) { 717 cnt = profile->default_count() / (lo_index != min_jint ? 2 : 1); 718 } 719 if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, cnt, trim_ranges)) { 720 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, cnt); 721 } 722 } 723 assert(rp < len+2, "not too many ranges"); 724 725 if (trim_ranges) { 726 merge_ranges(ranges, rp); 727 } 728 729 // Safepoint in case if backward branch observed 730 if( makes_backward_branch && UseLoopSafepoints ) 731 add_safepoint(); 732 733 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 734 } 735 736 737 //------------------------------do_lookupswitch-------------------------------- 738 void Parse::do_lookupswitch() { 739 Node *lookup = pop(); // lookup value 740 // Get information about lookupswitch 741 int default_dest = iter().get_dest_table(0); 742 int len = iter().get_int_table(1); 743 744 if (len < 1) { // If this is a backward branch, add safepoint 745 maybe_add_safepoint(default_dest); 746 merge(default_dest); 747 return; 748 } 749 750 ciMethodData* methodData = method()->method_data(); 751 ciMultiBranchData* profile = NULL; 752 if (methodData->is_mature() && UseSwitchProfiling) { 753 ciProfileData* data = methodData->bci_to_data(bci()); 754 if (data != NULL && data->is_MultiBranchData()) { 755 profile = (ciMultiBranchData*)data; 756 } 757 } 758 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 759 760 // generate decision tree, using trichotomy when possible 761 jint* table = NEW_RESOURCE_ARRAY(jint, len*3); 762 { 763 for (int j = 0; j < len; j++) { 764 table[3*j+0] = iter().get_int_table(2+2*j); 765 table[3*j+1] = iter().get_dest_table(2+2*j+1); 766 table[3*j+2] = profile == NULL ? 1 : profile->count_at(j); 767 } 768 qsort(table, len, 3*sizeof(table[0]), jint_cmp); 769 } 770 771 float defaults = 0; 772 jint prev = min_jint; 773 for (int j = 0; j < len; j++) { 774 jint match_int = table[3*j+0]; 775 if (match_int != prev) { 776 defaults += (float)match_int - prev; 777 } 778 prev = match_int+1; 779 } 780 if (prev-1 != max_jint) { 781 defaults += (float)max_jint - prev + 1; 782 } 783 float default_cnt = 1; 784 if (profile != NULL) { 785 default_cnt = profile->default_count()/defaults; 786 } 787 788 int rnum = len*2+1; 789 bool makes_backward_branch = false; 790 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 791 int rp = -1; 792 for (int j = 0; j < len; j++) { 793 jint match_int = table[3*j+0]; 794 int dest = table[3*j+1]; 795 int cnt = table[3*j+2]; 796 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 797 int table_index = method_data_update() ? j : NullTableIndex; 798 makes_backward_branch |= (dest <= bci()); 799 float c = default_cnt * ((float)match_int - next_lo); 800 if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, NullTableIndex, c, trim_ranges))) { 801 assert(default_dest != never_reached, "sentinel value for dead destinations"); 802 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex, c); 803 } 804 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 805 assert(dest != never_reached, "sentinel value for dead destinations"); 806 ranges[++rp].set(match_int, dest, table_index, cnt); 807 } 808 } 809 jint highest = table[3*(len-1)]; 810 assert(ranges[rp].hi() == highest, ""); 811 if (highest != max_jint && 812 !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest), trim_ranges)) { 813 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest)); 814 } 815 assert(rp < rnum, "not too many ranges"); 816 817 if (trim_ranges) { 818 merge_ranges(ranges, rp); 819 } 820 821 // Safepoint in case backward branch observed 822 if (makes_backward_branch && UseLoopSafepoints) 823 add_safepoint(); 824 825 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 826 } 827 828 static float if_prob(float taken_cnt, float total_cnt) { 829 assert(taken_cnt <= total_cnt, ""); 830 if (total_cnt == 0) { 831 return PROB_FAIR; 832 } 833 float p = taken_cnt / total_cnt; 834 return MIN2(MAX2(p, PROB_MIN), PROB_MAX); 835 } 836 837 static float if_cnt(float cnt) { 838 if (cnt == 0) { 839 return COUNT_UNKNOWN; 840 } 841 return cnt; 842 } 843 844 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) { 845 float total_cnt = 0; 846 for (SwitchRange* sr = lo; sr <= hi; sr++) { 847 total_cnt += sr->cnt(); 848 } 849 return total_cnt; 850 } 851 852 class SwitchRanges : public ResourceObj { 853 public: 854 SwitchRange* _lo; 855 SwitchRange* _hi; 856 SwitchRange* _mid; 857 float _cost; 858 859 enum { 860 Start, 861 LeftDone, 862 RightDone, 863 Done 864 } _state; 865 866 SwitchRanges(SwitchRange *lo, SwitchRange *hi) 867 : _lo(lo), _hi(hi), _mid(NULL), 868 _cost(0), _state(Start) { 869 } 870 871 SwitchRanges() 872 : _lo(NULL), _hi(NULL), _mid(NULL), 873 _cost(0), _state(Start) {} 874 }; 875 876 // Estimate cost of performing a binary search on lo..hi 877 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) { 878 GrowableArray<SwitchRanges> tree; 879 SwitchRanges root(lo, hi); 880 tree.push(root); 881 882 float cost = 0; 883 do { 884 SwitchRanges& r = *tree.adr_at(tree.length()-1); 885 if (r._hi != r._lo) { 886 if (r._mid == NULL) { 887 float r_cnt = sum_of_cnts(r._lo, r._hi); 888 889 if (r_cnt == 0) { 890 tree.pop(); 891 cost = 0; 892 continue; 893 } 894 895 SwitchRange* mid = NULL; 896 mid = r._lo; 897 for (float cnt = 0; ; ) { 898 assert(mid <= r._hi, "out of bounds"); 899 cnt += mid->cnt(); 900 if (cnt > r_cnt / 2) { 901 break; 902 } 903 mid++; 904 } 905 assert(mid <= r._hi, "out of bounds"); 906 r._mid = mid; 907 r._cost = r_cnt / total_cnt; 908 } 909 r._cost += cost; 910 if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) { 911 cost = 0; 912 r._state = SwitchRanges::LeftDone; 913 tree.push(SwitchRanges(r._lo, r._mid-1)); 914 } else if (r._state < SwitchRanges::RightDone) { 915 cost = 0; 916 r._state = SwitchRanges::RightDone; 917 tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi)); 918 } else { 919 tree.pop(); 920 cost = r._cost; 921 } 922 } else { 923 tree.pop(); 924 cost = r._cost; 925 } 926 } while (tree.length() > 0); 927 928 929 return cost; 930 } 931 932 // It sometimes pays off to test most common ranges before the binary search 933 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) { 934 uint nr = hi - lo + 1; 935 float total_cnt = sum_of_cnts(lo, hi); 936 937 float min = compute_tree_cost(lo, hi, total_cnt); 938 float extra = 1; 939 float sub = 0; 940 941 SwitchRange* array1 = lo; 942 SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr); 943 944 SwitchRange* ranges = NULL; 945 946 while (nr >= 2) { 947 assert(lo == array1 || lo == array2, "one the 2 already allocated arrays"); 948 ranges = (lo == array1) ? array2 : array1; 949 950 // Find highest frequency range 951 SwitchRange* candidate = lo; 952 for (SwitchRange* sr = lo+1; sr <= hi; sr++) { 953 if (sr->cnt() > candidate->cnt()) { 954 candidate = sr; 955 } 956 } 957 SwitchRange most_freq = *candidate; 958 if (most_freq.cnt() == 0) { 959 break; 960 } 961 962 // Copy remaining ranges into another array 963 int shift = 0; 964 for (uint i = 0; i < nr; i++) { 965 SwitchRange* sr = &lo[i]; 966 if (sr != candidate) { 967 ranges[i-shift] = *sr; 968 } else { 969 shift++; 970 if (i > 0 && i < nr-1) { 971 SwitchRange prev = lo[i-1]; 972 prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.table_index(), prev.cnt()); 973 if (prev.adjoin(lo[i+1])) { 974 shift++; 975 i++; 976 } 977 ranges[i-shift] = prev; 978 } 979 } 980 } 981 nr -= shift; 982 983 // Evaluate cost of testing the most common range and performing a 984 // binary search on the other ranges 985 float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt); 986 if (cost >= min) { 987 break; 988 } 989 // swap arrays 990 lo = &ranges[0]; 991 hi = &ranges[nr-1]; 992 993 // It pays off: emit the test for the most common range 994 assert(most_freq.cnt() > 0, "must be taken"); 995 Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo()))); 996 Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo()))); 997 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le)); 998 IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt())); 999 jump_if_true_fork(iff, most_freq.dest(), most_freq.table_index(), false); 1000 1001 sub += most_freq.cnt() / total_cnt; 1002 extra += 1 - sub; 1003 min = cost; 1004 } 1005 } 1006 1007 //----------------------------create_jump_tables------------------------------- 1008 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 1009 // Are jumptables enabled 1010 if (!UseJumpTables) return false; 1011 1012 // Are jumptables supported 1013 if (!Matcher::has_match_rule(Op_Jump)) return false; 1014 1015 // Don't make jump table if profiling 1016 if (method_data_update()) return false; 1017 1018 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 1019 1020 // Decide if a guard is needed to lop off big ranges at either (or 1021 // both) end(s) of the input set. We'll call this the default target 1022 // even though we can't be sure that it is the true "default". 1023 1024 bool needs_guard = false; 1025 int default_dest; 1026 int64_t total_outlier_size = 0; 1027 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 1028 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 1029 1030 if (lo->dest() == hi->dest()) { 1031 total_outlier_size = hi_size + lo_size; 1032 default_dest = lo->dest(); 1033 } else if (lo_size > hi_size) { 1034 total_outlier_size = lo_size; 1035 default_dest = lo->dest(); 1036 } else { 1037 total_outlier_size = hi_size; 1038 default_dest = hi->dest(); 1039 } 1040 1041 float total = sum_of_cnts(lo, hi); 1042 float cost = compute_tree_cost(lo, hi, total); 1043 1044 // If a guard test will eliminate very sparse end ranges, then 1045 // it is worth the cost of an extra jump. 1046 float trimmed_cnt = 0; 1047 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 1048 needs_guard = true; 1049 if (default_dest == lo->dest()) { 1050 trimmed_cnt += lo->cnt(); 1051 lo++; 1052 } 1053 if (default_dest == hi->dest()) { 1054 trimmed_cnt += hi->cnt(); 1055 hi--; 1056 } 1057 } 1058 1059 // Find the total number of cases and ranges 1060 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 1061 int num_range = hi - lo + 1; 1062 1063 // Don't create table if: too large, too small, or too sparse. 1064 if (num_cases > MaxJumpTableSize) 1065 return false; 1066 if (UseSwitchProfiling) { 1067 // MinJumpTableSize is set so with a well balanced binary tree, 1068 // when the number of ranges is MinJumpTableSize, it's cheaper to 1069 // go through a JumpNode that a tree of IfNodes. Average cost of a 1070 // tree of IfNodes with MinJumpTableSize is 1071 // log2f(MinJumpTableSize) comparisons. So if the cost computed 1072 // from profile data is less than log2f(MinJumpTableSize) then 1073 // going with the binary search is cheaper. 1074 if (cost < log2f(MinJumpTableSize)) { 1075 return false; 1076 } 1077 } else { 1078 if (num_cases < MinJumpTableSize) 1079 return false; 1080 } 1081 if (num_cases > (MaxJumpTableSparseness * num_range)) 1082 return false; 1083 1084 // Normalize table lookups to zero 1085 int lowval = lo->lo(); 1086 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 1087 1088 // Generate a guard to protect against input keyvals that aren't 1089 // in the switch domain. 1090 if (needs_guard) { 1091 Node* size = _gvn.intcon(num_cases); 1092 Node* cmp = _gvn.transform(new CmpUNode(key_val, size)); 1093 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge)); 1094 IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt)); 1095 jump_if_true_fork(iff, default_dest, NullTableIndex, trim_ranges && trimmed_cnt == 0); 1096 1097 total -= trimmed_cnt; 1098 } 1099 1100 // Create an ideal node JumpTable that has projections 1101 // of all possible ranges for a switch statement 1102 // The key_val input must be converted to a pointer offset and scaled. 1103 // Compare Parse::array_addressing above. 1104 1105 // Clean the 32-bit int into a real 64-bit offset. 1106 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 1107 const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); 1108 // Make I2L conversion control dependent to prevent it from 1109 // floating above the range check during loop optimizations. 1110 key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); 1111 1112 // Shift the value by wordsize so we have an index into the table, rather 1113 // than a switch value 1114 Node *shiftWord = _gvn.MakeConX(wordSize); 1115 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 1116 1117 // Create the JumpNode 1118 Arena* arena = C->comp_arena(); 1119 float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases); 1120 int i = 0; 1121 if (total == 0) { 1122 for (SwitchRange* r = lo; r <= hi; r++) { 1123 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1124 probs[i] = 1.0F / num_cases; 1125 } 1126 } 1127 } else { 1128 for (SwitchRange* r = lo; r <= hi; r++) { 1129 float prob = r->cnt()/total; 1130 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1131 probs[i] = prob / (r->hi() - r->lo() + 1); 1132 } 1133 } 1134 } 1135 1136 ciMethodData* methodData = method()->method_data(); 1137 ciMultiBranchData* profile = NULL; 1138 if (methodData->is_mature()) { 1139 ciProfileData* data = methodData->bci_to_data(bci()); 1140 if (data != NULL && data->is_MultiBranchData()) { 1141 profile = (ciMultiBranchData*)data; 1142 } 1143 } 1144 1145 Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total)); 1146 1147 // These are the switch destinations hanging off the jumpnode 1148 i = 0; 1149 for (SwitchRange* r = lo; r <= hi; r++) { 1150 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1151 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 1152 { 1153 PreserveJVMState pjvms(this); 1154 set_control(input); 1155 jump_if_always_fork(r->dest(), r->table_index(), trim_ranges && r->cnt() == 0); 1156 } 1157 } 1158 } 1159 assert(i == num_cases, "miscount of cases"); 1160 stop_and_kill_map(); // no more uses for this JVMS 1161 return true; 1162 } 1163 1164 //----------------------------jump_switch_ranges------------------------------- 1165 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 1166 Block* switch_block = block(); 1167 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 1168 1169 if (switch_depth == 0) { 1170 // Do special processing for the top-level call. 1171 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 1172 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 1173 1174 // Decrement pred-numbers for the unique set of nodes. 1175 #ifdef ASSERT 1176 if (!trim_ranges) { 1177 // Ensure that the block's successors are a (duplicate-free) set. 1178 int successors_counted = 0; // block occurrences in [hi..lo] 1179 int unique_successors = switch_block->num_successors(); 1180 for (int i = 0; i < unique_successors; i++) { 1181 Block* target = switch_block->successor_at(i); 1182 1183 // Check that the set of successors is the same in both places. 1184 int successors_found = 0; 1185 for (SwitchRange* p = lo; p <= hi; p++) { 1186 if (p->dest() == target->start()) successors_found++; 1187 } 1188 assert(successors_found > 0, "successor must be known"); 1189 successors_counted += successors_found; 1190 } 1191 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 1192 } 1193 #endif 1194 1195 // Maybe prune the inputs, based on the type of key_val. 1196 jint min_val = min_jint; 1197 jint max_val = max_jint; 1198 const TypeInt* ti = key_val->bottom_type()->isa_int(); 1199 if (ti != NULL) { 1200 min_val = ti->_lo; 1201 max_val = ti->_hi; 1202 assert(min_val <= max_val, "invalid int type"); 1203 } 1204 while (lo->hi() < min_val) { 1205 lo++; 1206 } 1207 if (lo->lo() < min_val) { 1208 lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index(), lo->cnt()); 1209 } 1210 while (hi->lo() > max_val) { 1211 hi--; 1212 } 1213 if (hi->hi() > max_val) { 1214 hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index(), hi->cnt()); 1215 } 1216 1217 linear_search_switch_ranges(key_val, lo, hi); 1218 } 1219 1220 #ifndef PRODUCT 1221 if (switch_depth == 0) { 1222 _max_switch_depth = 0; 1223 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 1224 } 1225 #endif 1226 1227 assert(lo <= hi, "must be a non-empty set of ranges"); 1228 if (lo == hi) { 1229 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1230 } else { 1231 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 1232 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 1233 1234 if (create_jump_tables(key_val, lo, hi)) return; 1235 1236 SwitchRange* mid = NULL; 1237 float total_cnt = sum_of_cnts(lo, hi); 1238 1239 int nr = hi - lo + 1; 1240 if (UseSwitchProfiling) { 1241 // Don't keep the binary search tree balanced: pick up mid point 1242 // that split frequencies in half. 1243 float cnt = 0; 1244 for (SwitchRange* sr = lo; sr <= hi; sr++) { 1245 cnt += sr->cnt(); 1246 if (cnt >= total_cnt / 2) { 1247 mid = sr; 1248 break; 1249 } 1250 } 1251 } else { 1252 mid = lo + nr/2; 1253 1254 // if there is an easy choice, pivot at a singleton: 1255 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 1256 1257 assert(lo < mid && mid <= hi, "good pivot choice"); 1258 assert(nr != 2 || mid == hi, "should pick higher of 2"); 1259 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 1260 } 1261 1262 1263 Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo()); 1264 1265 if (mid->is_singleton()) { 1266 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt())); 1267 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index(), trim_ranges && mid->cnt() == 0); 1268 1269 // Special Case: If there are exactly three ranges, and the high 1270 // and low range each go to the same place, omit the "gt" test, 1271 // since it will not discriminate anything. 1272 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo; 1273 1274 // if there is a higher range, test for it and process it: 1275 if (mid < hi && !eq_test_only) { 1276 // two comparisons of same values--should enable 1 test for 2 branches 1277 // Use BoolTest::le instead of BoolTest::gt 1278 float cnt = sum_of_cnts(lo, mid-1); 1279 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le, if_prob(cnt, total_cnt), if_cnt(cnt)); 1280 Node *iftrue = _gvn.transform( new IfTrueNode(iff_le) ); 1281 Node *iffalse = _gvn.transform( new IfFalseNode(iff_le) ); 1282 { PreserveJVMState pjvms(this); 1283 set_control(iffalse); 1284 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 1285 } 1286 set_control(iftrue); 1287 } 1288 1289 } else { 1290 // mid is a range, not a singleton, so treat mid..hi as a unit 1291 float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi); 1292 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt)); 1293 1294 // if there is a higher range, test for it and process it: 1295 if (mid == hi) { 1296 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index(), trim_ranges && cnt == 0); 1297 } else { 1298 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 1299 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 1300 { PreserveJVMState pjvms(this); 1301 set_control(iftrue); 1302 jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1); 1303 } 1304 set_control(iffalse); 1305 } 1306 } 1307 1308 // in any case, process the lower range 1309 if (mid == lo) { 1310 if (mid->is_singleton()) { 1311 jump_switch_ranges(key_val, lo+1, hi, switch_depth+1); 1312 } else { 1313 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1314 } 1315 } else { 1316 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 1317 } 1318 } 1319 1320 // Decrease pred_count for each successor after all is done. 1321 if (switch_depth == 0) { 1322 int unique_successors = switch_block->num_successors(); 1323 for (int i = 0; i < unique_successors; i++) { 1324 Block* target = switch_block->successor_at(i); 1325 // Throw away the pre-allocated path for each unique successor. 1326 target->next_path_num(); 1327 } 1328 } 1329 1330 #ifndef PRODUCT 1331 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 1332 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 1333 SwitchRange* r; 1334 int nsing = 0; 1335 for( r = lo; r <= hi; r++ ) { 1336 if( r->is_singleton() ) nsing++; 1337 } 1338 tty->print(">>> "); 1339 _method->print_short_name(); 1340 tty->print_cr(" switch decision tree"); 1341 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 1342 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 1343 if (_max_switch_depth > _est_switch_depth) { 1344 tty->print_cr("******** BAD SWITCH DEPTH ********"); 1345 } 1346 tty->print(" "); 1347 for( r = lo; r <= hi; r++ ) { 1348 r->print(); 1349 } 1350 tty->cr(); 1351 } 1352 #endif 1353 } 1354 1355 void Parse::modf() { 1356 Node *f2 = pop(); 1357 Node *f1 = pop(); 1358 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 1359 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 1360 "frem", NULL, //no memory effects 1361 f1, f2); 1362 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1363 1364 push(res); 1365 } 1366 1367 void Parse::modd() { 1368 Node *d2 = pop_pair(); 1369 Node *d1 = pop_pair(); 1370 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 1371 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 1372 "drem", NULL, //no memory effects 1373 d1, top(), d2, top()); 1374 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1375 1376 #ifdef ASSERT 1377 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 1378 assert(res_top == top(), "second value must be top"); 1379 #endif 1380 1381 push_pair(res_d); 1382 } 1383 1384 void Parse::l2f() { 1385 Node* f2 = pop(); 1386 Node* f1 = pop(); 1387 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 1388 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 1389 "l2f", NULL, //no memory effects 1390 f1, f2); 1391 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1392 1393 push(res); 1394 } 1395 1396 void Parse::do_irem() { 1397 // Must keep both values on the expression-stack during null-check 1398 zero_check_int(peek()); 1399 // Compile-time detect of null-exception? 1400 if (stopped()) return; 1401 1402 Node* b = pop(); 1403 Node* a = pop(); 1404 1405 const Type *t = _gvn.type(b); 1406 if (t != Type::TOP) { 1407 const TypeInt *ti = t->is_int(); 1408 if (ti->is_con()) { 1409 int divisor = ti->get_con(); 1410 // check for positive power of 2 1411 if (divisor > 0 && 1412 (divisor & ~(divisor-1)) == divisor) { 1413 // yes ! 1414 Node *mask = _gvn.intcon((divisor - 1)); 1415 // Sigh, must handle negative dividends 1416 Node *zero = _gvn.intcon(0); 1417 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt, PROB_FAIR, COUNT_UNKNOWN); 1418 Node *iff = _gvn.transform( new IfFalseNode(ifff) ); 1419 Node *ift = _gvn.transform( new IfTrueNode (ifff) ); 1420 Node *reg = jump_if_join(ift, iff); 1421 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 1422 // Negative path; negate/and/negate 1423 Node *neg = _gvn.transform( new SubINode(zero, a) ); 1424 Node *andn= _gvn.transform( new AndINode(neg, mask) ); 1425 Node *negn= _gvn.transform( new SubINode(zero, andn) ); 1426 phi->init_req(1, negn); 1427 // Fast positive case 1428 Node *andx = _gvn.transform( new AndINode(a, mask) ); 1429 phi->init_req(2, andx); 1430 // Push the merge 1431 push( _gvn.transform(phi) ); 1432 return; 1433 } 1434 } 1435 } 1436 // Default case 1437 push( _gvn.transform( new ModINode(control(),a,b) ) ); 1438 } 1439 1440 // Handle jsr and jsr_w bytecode 1441 void Parse::do_jsr() { 1442 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 1443 1444 // Store information about current state, tagged with new _jsr_bci 1445 int return_bci = iter().next_bci(); 1446 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 1447 1448 // Update method data 1449 profile_taken_branch(jsr_bci); 1450 1451 // The way we do things now, there is only one successor block 1452 // for the jsr, because the target code is cloned by ciTypeFlow. 1453 Block* target = successor_for_bci(jsr_bci); 1454 1455 // What got pushed? 1456 const Type* ret_addr = target->peek(); 1457 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 1458 1459 // Effect on jsr on stack 1460 push(_gvn.makecon(ret_addr)); 1461 1462 // Flow to the jsr. 1463 merge(jsr_bci); 1464 } 1465 1466 // Handle ret bytecode 1467 void Parse::do_ret() { 1468 // Find to whom we return. 1469 assert(block()->num_successors() == 1, "a ret can only go one place now"); 1470 Block* target = block()->successor_at(0); 1471 assert(!target->is_ready(), "our arrival must be expected"); 1472 profile_ret(target->flow()->start()); 1473 int pnum = target->next_path_num(); 1474 merge_common(target, pnum); 1475 } 1476 1477 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 1478 if (btest != BoolTest::eq && btest != BoolTest::ne) { 1479 // Only ::eq and ::ne are supported for profile injection. 1480 return false; 1481 } 1482 if (test->is_Cmp() && 1483 test->in(1)->Opcode() == Op_ProfileBoolean) { 1484 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 1485 int false_cnt = profile->false_count(); 1486 int true_cnt = profile->true_count(); 1487 1488 // Counts matching depends on the actual test operation (::eq or ::ne). 1489 // No need to scale the counts because profile injection was designed 1490 // to feed exact counts into VM. 1491 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 1492 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 1493 1494 profile->consume(); 1495 return true; 1496 } 1497 return false; 1498 } 1499 //--------------------------dynamic_branch_prediction-------------------------- 1500 // Try to gather dynamic branch prediction behavior. Return a probability 1501 // of the branch being taken and set the "cnt" field. Returns a -1.0 1502 // if we need to use static prediction for some reason. 1503 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 1504 ResourceMark rm; 1505 1506 cnt = COUNT_UNKNOWN; 1507 1508 int taken = 0; 1509 int not_taken = 0; 1510 1511 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 1512 1513 if (use_mdo) { 1514 // Use MethodData information if it is available 1515 // FIXME: free the ProfileData structure 1516 ciMethodData* methodData = method()->method_data(); 1517 if (!methodData->is_mature()) return PROB_UNKNOWN; 1518 ciProfileData* data = methodData->bci_to_data(bci()); 1519 if (data == NULL) { 1520 return PROB_UNKNOWN; 1521 } 1522 if (!data->is_JumpData()) return PROB_UNKNOWN; 1523 1524 // get taken and not taken values 1525 taken = data->as_JumpData()->taken(); 1526 not_taken = 0; 1527 if (data->is_BranchData()) { 1528 not_taken = data->as_BranchData()->not_taken(); 1529 } 1530 1531 // scale the counts to be commensurate with invocation counts: 1532 taken = method()->scale_count(taken); 1533 not_taken = method()->scale_count(not_taken); 1534 } 1535 1536 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 1537 // We also check that individual counters are positive first, otherwise the sum can become positive. 1538 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 1539 if (C->log() != NULL) { 1540 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 1541 } 1542 return PROB_UNKNOWN; 1543 } 1544 1545 // Compute frequency that we arrive here 1546 float sum = taken + not_taken; 1547 // Adjust, if this block is a cloned private block but the 1548 // Jump counts are shared. Taken the private counts for 1549 // just this path instead of the shared counts. 1550 if( block()->count() > 0 ) 1551 sum = block()->count(); 1552 cnt = sum / FreqCountInvocations; 1553 1554 // Pin probability to sane limits 1555 float prob; 1556 if( !taken ) 1557 prob = (0+PROB_MIN) / 2; 1558 else if( !not_taken ) 1559 prob = (1+PROB_MAX) / 2; 1560 else { // Compute probability of true path 1561 prob = (float)taken / (float)(taken + not_taken); 1562 if (prob > PROB_MAX) prob = PROB_MAX; 1563 if (prob < PROB_MIN) prob = PROB_MIN; 1564 } 1565 1566 assert((cnt > 0.0f) && (prob > 0.0f), 1567 "Bad frequency assignment in if"); 1568 1569 if (C->log() != NULL) { 1570 const char* prob_str = NULL; 1571 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 1572 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 1573 char prob_str_buf[30]; 1574 if (prob_str == NULL) { 1575 jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob); 1576 prob_str = prob_str_buf; 1577 } 1578 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 1579 iter().get_dest(), taken, not_taken, cnt, prob_str); 1580 } 1581 return prob; 1582 } 1583 1584 //-----------------------------branch_prediction------------------------------- 1585 float Parse::branch_prediction(float& cnt, 1586 BoolTest::mask btest, 1587 int target_bci, 1588 Node* test) { 1589 float prob = dynamic_branch_prediction(cnt, btest, test); 1590 // If prob is unknown, switch to static prediction 1591 if (prob != PROB_UNKNOWN) return prob; 1592 1593 prob = PROB_FAIR; // Set default value 1594 if (btest == BoolTest::eq) // Exactly equal test? 1595 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 1596 else if (btest == BoolTest::ne) 1597 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 1598 1599 // If this is a conditional test guarding a backwards branch, 1600 // assume its a loop-back edge. Make it a likely taken branch. 1601 if (target_bci < bci()) { 1602 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 1603 // Since it's an OSR, we probably have profile data, but since 1604 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 1605 // Let's make a special check here for completely zero counts. 1606 ciMethodData* methodData = method()->method_data(); 1607 if (!methodData->is_empty()) { 1608 ciProfileData* data = methodData->bci_to_data(bci()); 1609 // Only stop for truly zero counts, which mean an unknown part 1610 // of the OSR-ed method, and we want to deopt to gather more stats. 1611 // If you have ANY counts, then this loop is simply 'cold' relative 1612 // to the OSR loop. 1613 if (data == NULL || 1614 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 1615 // This is the only way to return PROB_UNKNOWN: 1616 return PROB_UNKNOWN; 1617 } 1618 } 1619 } 1620 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 1621 } 1622 1623 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 1624 return prob; 1625 } 1626 1627 // The magic constants are chosen so as to match the output of 1628 // branch_prediction() when the profile reports a zero taken count. 1629 // It is important to distinguish zero counts unambiguously, because 1630 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 1631 // very small but nonzero probabilities, which if confused with zero 1632 // counts would keep the program recompiling indefinitely. 1633 bool Parse::seems_never_taken(float prob) const { 1634 return prob < PROB_MIN; 1635 } 1636 1637 // True if the comparison seems to be the kind that will not change its 1638 // statistics from true to false. See comments in adjust_map_after_if. 1639 // This question is only asked along paths which are already 1640 // classifed as untaken (by seems_never_taken), so really, 1641 // if a path is never taken, its controlling comparison is 1642 // already acting in a stable fashion. If the comparison 1643 // seems stable, we will put an expensive uncommon trap 1644 // on the untaken path. 1645 bool Parse::seems_stable_comparison() const { 1646 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 1647 return false; 1648 } 1649 return true; 1650 } 1651 1652 //-------------------------------repush_if_args-------------------------------- 1653 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 1654 inline int Parse::repush_if_args() { 1655 if (PrintOpto && WizardMode) { 1656 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 1657 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 1658 method()->print_name(); tty->cr(); 1659 } 1660 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 1661 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 1662 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 1663 assert(argument(0) != NULL, "must exist"); 1664 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 1665 inc_sp(bc_depth); 1666 return bc_depth; 1667 } 1668 1669 //----------------------------------do_ifnull---------------------------------- 1670 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 1671 int target_bci = iter().get_dest(); 1672 1673 Block* branch_block = successor_for_bci(target_bci); 1674 Block* next_block = successor_for_bci(iter().next_bci()); 1675 1676 float cnt; 1677 float prob = branch_prediction(cnt, btest, target_bci, c); 1678 if (prob == PROB_UNKNOWN) { 1679 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 1680 if (PrintOpto && Verbose) { 1681 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1682 } 1683 repush_if_args(); // to gather stats on loop 1684 // We need to mark this branch as taken so that if we recompile we will 1685 // see that it is possible. In the tiered system the interpreter doesn't 1686 // do profiling and by the time we get to the lower tier from the interpreter 1687 // the path may be cold again. Make sure it doesn't look untaken 1688 profile_taken_branch(target_bci, !ProfileInterpreter); 1689 uncommon_trap(Deoptimization::Reason_unreached, 1690 Deoptimization::Action_reinterpret, 1691 NULL, "cold"); 1692 if (C->eliminate_boxing()) { 1693 // Mark the successor blocks as parsed 1694 branch_block->next_path_num(); 1695 next_block->next_path_num(); 1696 } 1697 return; 1698 } 1699 1700 NOT_PRODUCT(explicit_null_checks_inserted++); 1701 1702 // Generate real control flow 1703 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1704 1705 // Sanity check the probability value 1706 assert(prob > 0.0f,"Bad probability in Parser"); 1707 // Need xform to put node in hash table 1708 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1709 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1710 // True branch 1711 { PreserveJVMState pjvms(this); 1712 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1713 set_control(iftrue); 1714 1715 if (stopped()) { // Path is dead? 1716 NOT_PRODUCT(explicit_null_checks_elided++); 1717 if (C->eliminate_boxing()) { 1718 // Mark the successor block as parsed 1719 branch_block->next_path_num(); 1720 } 1721 } else { // Path is live. 1722 // Update method data 1723 profile_taken_branch(target_bci); 1724 adjust_map_after_if(btest, c, prob, branch_block); 1725 if (!stopped()) { 1726 merge(target_bci); 1727 } 1728 } 1729 } 1730 1731 // False branch 1732 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1733 set_control(iffalse); 1734 1735 if (stopped()) { // Path is dead? 1736 NOT_PRODUCT(explicit_null_checks_elided++); 1737 if (C->eliminate_boxing()) { 1738 // Mark the successor block as parsed 1739 next_block->next_path_num(); 1740 } 1741 } else { // Path is live. 1742 // Update method data 1743 profile_not_taken_branch(); 1744 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block); 1745 } 1746 } 1747 1748 //------------------------------------do_if------------------------------------ 1749 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) { 1750 int target_bci = iter().get_dest(); 1751 1752 Block* branch_block = successor_for_bci(target_bci); 1753 Block* next_block = successor_for_bci(iter().next_bci()); 1754 1755 float cnt; 1756 float prob = branch_prediction(cnt, btest, target_bci, c); 1757 float untaken_prob = 1.0 - prob; 1758 1759 if (prob == PROB_UNKNOWN) { 1760 if (PrintOpto && Verbose) { 1761 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1762 } 1763 repush_if_args(); // to gather stats on loop 1764 // We need to mark this branch as taken so that if we recompile we will 1765 // see that it is possible. In the tiered system the interpreter doesn't 1766 // do profiling and by the time we get to the lower tier from the interpreter 1767 // the path may be cold again. Make sure it doesn't look untaken 1768 profile_taken_branch(target_bci, !ProfileInterpreter); 1769 uncommon_trap(Deoptimization::Reason_unreached, 1770 Deoptimization::Action_reinterpret, 1771 NULL, "cold"); 1772 if (C->eliminate_boxing()) { 1773 // Mark the successor blocks as parsed 1774 branch_block->next_path_num(); 1775 next_block->next_path_num(); 1776 } 1777 return; 1778 } 1779 1780 // Sanity check the probability value 1781 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1782 1783 bool taken_if_true = true; 1784 // Convert BoolTest to canonical form: 1785 if (!BoolTest(btest).is_canonical()) { 1786 btest = BoolTest(btest).negate(); 1787 taken_if_true = false; 1788 // prob is NOT updated here; it remains the probability of the taken 1789 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1790 } 1791 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1792 1793 Node* tst0 = new BoolNode(c, btest); 1794 Node* tst = _gvn.transform(tst0); 1795 BoolTest::mask taken_btest = BoolTest::illegal; 1796 BoolTest::mask untaken_btest = BoolTest::illegal; 1797 1798 if (tst->is_Bool()) { 1799 // Refresh c from the transformed bool node, since it may be 1800 // simpler than the original c. Also re-canonicalize btest. 1801 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1802 // That can arise from statements like: if (x instanceof C) ... 1803 if (tst != tst0) { 1804 // Canonicalize one more time since transform can change it. 1805 btest = tst->as_Bool()->_test._test; 1806 if (!BoolTest(btest).is_canonical()) { 1807 // Reverse edges one more time... 1808 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1809 btest = tst->as_Bool()->_test._test; 1810 assert(BoolTest(btest).is_canonical(), "sanity"); 1811 taken_if_true = !taken_if_true; 1812 } 1813 c = tst->in(1); 1814 } 1815 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1816 taken_btest = taken_if_true ? btest : neg_btest; 1817 untaken_btest = taken_if_true ? neg_btest : btest; 1818 } 1819 1820 // Generate real control flow 1821 float true_prob = (taken_if_true ? prob : untaken_prob); 1822 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1823 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1824 Node* taken_branch = new IfTrueNode(iff); 1825 Node* untaken_branch = new IfFalseNode(iff); 1826 if (!taken_if_true) { // Finish conversion to canonical form 1827 Node* tmp = taken_branch; 1828 taken_branch = untaken_branch; 1829 untaken_branch = tmp; 1830 } 1831 1832 // Branch is taken: 1833 { PreserveJVMState pjvms(this); 1834 taken_branch = _gvn.transform(taken_branch); 1835 set_control(taken_branch); 1836 1837 if (stopped()) { 1838 if (C->eliminate_boxing() && !new_path) { 1839 // Mark the successor block as parsed (if we haven't created a new path) 1840 branch_block->next_path_num(); 1841 } 1842 } else { 1843 // Update method data 1844 profile_taken_branch(target_bci); 1845 adjust_map_after_if(taken_btest, c, prob, branch_block); 1846 if (!stopped()) { 1847 if (new_path) { 1848 // Merge by using a new path 1849 merge_new_path(target_bci); 1850 } else if (ctrl_taken != NULL) { 1851 // Don't merge but save taken branch to be wired by caller 1852 *ctrl_taken = control(); 1853 } else { 1854 merge(target_bci); 1855 } 1856 } 1857 } 1858 } 1859 1860 untaken_branch = _gvn.transform(untaken_branch); 1861 set_control(untaken_branch); 1862 1863 // Branch not taken. 1864 if (stopped() && ctrl_taken == NULL) { 1865 if (C->eliminate_boxing()) { 1866 // Mark the successor block as parsed (if caller does not re-wire control flow) 1867 next_block->next_path_num(); 1868 } 1869 } else { 1870 // Update method data 1871 profile_not_taken_branch(); 1872 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block); 1873 } 1874 } 1875 1876 void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) { 1877 ciMethod* subst_method = ciEnv::current()->ValueBootstrapMethods_klass()->find_method(ciSymbol::isSubstitutable_name(), ciSymbol::object_object_boolean_signature()); 1878 // If current method is ValueBootstrapMethods::isSubstitutable(), 1879 // compile the acmp as a regular pointer comparison otherwise we 1880 // could call ValueBootstrapMethods::isSubstitutable() back 1881 if (!EnableValhalla || (method() == subst_method)) { 1882 Node* cmp = CmpP(a, b); 1883 cmp = optimize_cmp_with_klass(cmp); 1884 do_if(btest, cmp); 1885 return; 1886 } 1887 1888 // Substitutability test 1889 if (a->is_ValueType()) { 1890 inc_sp(2); 1891 a = a->as_ValueType()->allocate(this, true)->get_oop(); 1892 dec_sp(2); 1893 } 1894 if (b->is_ValueType()) { 1895 inc_sp(2); 1896 b = b->as_ValueType()->allocate(this, true)->get_oop(); 1897 dec_sp(2); 1898 } 1899 1900 const TypeOopPtr* ta = _gvn.type(a)->isa_oopptr(); 1901 const TypeOopPtr* tb = _gvn.type(b)->isa_oopptr(); 1902 1903 if (ta == NULL || !ta->can_be_value_type_raw() || 1904 tb == NULL || !tb->can_be_value_type_raw()) { 1905 Node* cmp = CmpP(a, b); 1906 cmp = optimize_cmp_with_klass(cmp); 1907 do_if(btest, cmp); 1908 return; 1909 } 1910 1911 Node* cmp = CmpP(a, b); 1912 cmp = optimize_cmp_with_klass(cmp); 1913 Node* eq_region = NULL; 1914 if (btest == BoolTest::eq) { 1915 do_if(btest, cmp, true); 1916 if (stopped()) { 1917 return; 1918 } 1919 } else { 1920 assert(btest == BoolTest::ne, "only eq or ne"); 1921 Node* is_not_equal = NULL; 1922 eq_region = new RegionNode(3); 1923 { 1924 PreserveJVMState pjvms(this); 1925 do_if(btest, cmp, false, &is_not_equal); 1926 if (!stopped()) { 1927 eq_region->init_req(1, control()); 1928 } 1929 } 1930 if (is_not_equal == NULL || is_not_equal->is_top()) { 1931 record_for_igvn(eq_region); 1932 set_control(_gvn.transform(eq_region)); 1933 return; 1934 } 1935 set_control(is_not_equal); 1936 } 1937 // Pointers not equal, check for values 1938 Node* ne_region = new RegionNode(6); 1939 inc_sp(2); 1940 Node* null_ctl = top(); 1941 Node* not_null_a = null_check_oop(a, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); 1942 dec_sp(2); 1943 ne_region->init_req(1, null_ctl); 1944 if (stopped()) { 1945 record_for_igvn(ne_region); 1946 set_control(_gvn.transform(ne_region)); 1947 if (btest == BoolTest::ne) { 1948 { 1949 PreserveJVMState pjvms(this); 1950 int target_bci = iter().get_dest(); 1951 merge(target_bci); 1952 } 1953 record_for_igvn(eq_region); 1954 set_control(_gvn.transform(eq_region)); 1955 } 1956 return; 1957 } 1958 1959 Node* is_value = is_always_locked(not_null_a); 1960 Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); 1961 Node* is_value_cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); 1962 Node* is_value_bol = _gvn.transform(new BoolNode(is_value_cmp, BoolTest::ne)); 1963 IfNode* is_value_iff = create_and_map_if(control(), is_value_bol, PROB_FAIR, COUNT_UNKNOWN); 1964 Node* not_value = _gvn.transform(new IfTrueNode(is_value_iff)); 1965 set_control(_gvn.transform(new IfFalseNode(is_value_iff))); 1966 ne_region->init_req(2, not_value); 1967 1968 // One of the 2 pointers refers to a value, check if both are of 1969 // the same class 1970 inc_sp(2); 1971 null_ctl = top(); 1972 Node* not_null_b = null_check_oop(b, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); 1973 dec_sp(2); 1974 ne_region->init_req(3, null_ctl); 1975 if (stopped()) { 1976 record_for_igvn(ne_region); 1977 set_control(_gvn.transform(ne_region)); 1978 if (btest == BoolTest::ne) { 1979 { 1980 PreserveJVMState pjvms(this); 1981 int target_bci = iter().get_dest(); 1982 merge(target_bci); 1983 } 1984 record_for_igvn(eq_region); 1985 set_control(_gvn.transform(eq_region)); 1986 } 1987 return; 1988 } 1989 Node* kls_a = load_object_klass(not_null_a); 1990 Node* kls_b = load_object_klass(not_null_b); 1991 Node* kls_cmp = CmpP(kls_a, kls_b); 1992 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne)); 1993 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN); 1994 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff)); 1995 set_control(_gvn.transform(new IfFalseNode(kls_iff))); 1996 ne_region->init_req(4, kls_ne); 1997 1998 if (stopped()) { 1999 record_for_igvn(ne_region); 2000 set_control(_gvn.transform(ne_region)); 2001 if (btest == BoolTest::ne) { 2002 { 2003 PreserveJVMState pjvms(this); 2004 int target_bci = iter().get_dest(); 2005 merge(target_bci); 2006 } 2007 record_for_igvn(eq_region); 2008 set_control(_gvn.transform(eq_region)); 2009 } 2010 return; 2011 } 2012 // Both are values of the same class, we need to perform a 2013 // substitutability test. Delegate to 2014 // ValueBootstrapMethods::isSubstitutable(). 2015 2016 Node* ne_io_phi = PhiNode::make(ne_region, i_o()); 2017 Node* mem = reset_memory(); 2018 Node* ne_mem_phi = PhiNode::make(ne_region, mem); 2019 2020 Node* eq_io_phi = NULL; 2021 Node* eq_mem_phi = NULL; 2022 if (eq_region != NULL) { 2023 eq_io_phi = PhiNode::make(eq_region, i_o()); 2024 eq_mem_phi = PhiNode::make(eq_region, mem); 2025 } 2026 2027 set_all_memory(mem); 2028 2029 kill_dead_locals(); 2030 CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method, bci()); 2031 call->set_override_symbolic_info(true); 2032 call->init_req(TypeFunc::Parms, not_null_a); 2033 call->init_req(TypeFunc::Parms+1, not_null_b); 2034 inc_sp(2); 2035 set_edges_for_java_call(call, false, false); 2036 Node* ret = set_results_for_java_call(call, false, true); 2037 dec_sp(2); 2038 2039 // Test the return value of ValueBootstrapMethods::isSubstitutable() 2040 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1))); 2041 Node* ctl = C->top(); 2042 if (btest == BoolTest::eq) { 2043 PreserveJVMState pjvms(this); 2044 do_if(btest, subst_cmp); 2045 if (!stopped()) { 2046 ctl = control(); 2047 } 2048 } else { 2049 assert(btest == BoolTest::ne, "only eq or ne"); 2050 PreserveJVMState pjvms(this); 2051 do_if(btest, subst_cmp, false, &ctl); 2052 if (!stopped()) { 2053 eq_region->init_req(2, control()); 2054 eq_io_phi->init_req(2, i_o()); 2055 eq_mem_phi->init_req(2, reset_memory()); 2056 } 2057 } 2058 ne_region->init_req(5, ctl); 2059 ne_io_phi->init_req(5, i_o()); 2060 ne_mem_phi->init_req(5, reset_memory()); 2061 2062 record_for_igvn(ne_region); 2063 set_control(_gvn.transform(ne_region)); 2064 set_i_o(_gvn.transform(ne_io_phi)); 2065 set_all_memory(_gvn.transform(ne_mem_phi)); 2066 2067 if (btest == BoolTest::ne) { 2068 { 2069 PreserveJVMState pjvms(this); 2070 int target_bci = iter().get_dest(); 2071 merge(target_bci); 2072 } 2073 2074 record_for_igvn(eq_region); 2075 set_control(_gvn.transform(eq_region)); 2076 set_i_o(_gvn.transform(eq_io_phi)); 2077 set_all_memory(_gvn.transform(eq_mem_phi)); 2078 } 2079 } 2080 2081 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 2082 // Don't want to speculate on uncommon traps when running with -Xcomp 2083 if (!UseInterpreter) { 2084 return false; 2085 } 2086 return (seems_never_taken(prob) && seems_stable_comparison()); 2087 } 2088 2089 void Parse::maybe_add_predicate_after_if(Block* path) { 2090 if (path->is_SEL_head() && path->preds_parsed() == 0) { 2091 // Add predicates at bci of if dominating the loop so traps can be 2092 // recorded on the if's profile data 2093 int bc_depth = repush_if_args(); 2094 add_predicate(); 2095 dec_sp(bc_depth); 2096 path->set_has_predicates(); 2097 } 2098 } 2099 2100 2101 //----------------------------adjust_map_after_if------------------------------ 2102 // Adjust the JVM state to reflect the result of taking this path. 2103 // Basically, it means inspecting the CmpNode controlling this 2104 // branch, seeing how it constrains a tested value, and then 2105 // deciding if it's worth our while to encode this constraint 2106 // as graph nodes in the current abstract interpretation map. 2107 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) { 2108 if (!c->is_Cmp()) { 2109 maybe_add_predicate_after_if(path); 2110 return; 2111 } 2112 2113 if (stopped() || btest == BoolTest::illegal) { 2114 return; // nothing to do 2115 } 2116 2117 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 2118 2119 if (path_is_suitable_for_uncommon_trap(prob)) { 2120 repush_if_args(); 2121 uncommon_trap(Deoptimization::Reason_unstable_if, 2122 Deoptimization::Action_reinterpret, 2123 NULL, 2124 (is_fallthrough ? "taken always" : "taken never")); 2125 return; 2126 } 2127 2128 Node* val = c->in(1); 2129 Node* con = c->in(2); 2130 const Type* tcon = _gvn.type(con); 2131 const Type* tval = _gvn.type(val); 2132 bool have_con = tcon->singleton(); 2133 if (tval->singleton()) { 2134 if (!have_con) { 2135 // Swap, so constant is in con. 2136 con = val; 2137 tcon = tval; 2138 val = c->in(2); 2139 tval = _gvn.type(val); 2140 btest = BoolTest(btest).commute(); 2141 have_con = true; 2142 } else { 2143 // Do we have two constants? Then leave well enough alone. 2144 have_con = false; 2145 } 2146 } 2147 if (!have_con) { // remaining adjustments need a con 2148 maybe_add_predicate_after_if(path); 2149 return; 2150 } 2151 2152 sharpen_type_after_if(btest, con, tcon, val, tval); 2153 maybe_add_predicate_after_if(path); 2154 } 2155 2156 2157 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 2158 Node* ldk; 2159 if (n->is_DecodeNKlass()) { 2160 if (n->in(1)->Opcode() != Op_LoadNKlass) { 2161 return NULL; 2162 } else { 2163 ldk = n->in(1); 2164 } 2165 } else if (n->Opcode() != Op_LoadKlass) { 2166 return NULL; 2167 } else { 2168 ldk = n; 2169 } 2170 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 2171 2172 Node* adr = ldk->in(MemNode::Address); 2173 intptr_t off = 0; 2174 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 2175 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 2176 return NULL; 2177 const TypePtr* tp = gvn->type(obj)->is_ptr(); 2178 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 2179 return NULL; 2180 2181 return obj; 2182 } 2183 2184 void Parse::sharpen_type_after_if(BoolTest::mask btest, 2185 Node* con, const Type* tcon, 2186 Node* val, const Type* tval) { 2187 // Look for opportunities to sharpen the type of a node 2188 // whose klass is compared with a constant klass. 2189 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 2190 Node* obj = extract_obj_from_klass_load(&_gvn, val); 2191 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 2192 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 2193 // Found: 2194 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 2195 // or the narrowOop equivalent. 2196 const Type* obj_type = _gvn.type(obj); 2197 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 2198 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 2199 tboth->higher_equal(obj_type)) { 2200 // obj has to be of the exact type Foo if the CmpP succeeds. 2201 int obj_in_map = map()->find_edge(obj); 2202 JVMState* jvms = this->jvms(); 2203 if (obj_in_map >= 0 && 2204 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 2205 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 2206 const Type* tcc = ccast->as_Type()->type(); 2207 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 2208 // Delay transform() call to allow recovery of pre-cast value 2209 // at the control merge. 2210 _gvn.set_type_bottom(ccast); 2211 record_for_igvn(ccast); 2212 // Here's the payoff. 2213 replace_in_map(obj, ccast); 2214 } 2215 } 2216 } 2217 } 2218 2219 int val_in_map = map()->find_edge(val); 2220 if (val_in_map < 0) return; // replace_in_map would be useless 2221 { 2222 JVMState* jvms = this->jvms(); 2223 if (!(jvms->is_loc(val_in_map) || 2224 jvms->is_stk(val_in_map))) 2225 return; // again, it would be useless 2226 } 2227 2228 // Check for a comparison to a constant, and "know" that the compared 2229 // value is constrained on this path. 2230 assert(tcon->singleton(), ""); 2231 ConstraintCastNode* ccast = NULL; 2232 Node* cast = NULL; 2233 2234 switch (btest) { 2235 case BoolTest::eq: // Constant test? 2236 { 2237 const Type* tboth = tcon->join_speculative(tval); 2238 if (tboth == tval) break; // Nothing to gain. 2239 if (tcon->isa_int()) { 2240 ccast = new CastIINode(val, tboth); 2241 } else if (tcon == TypePtr::NULL_PTR) { 2242 // Cast to null, but keep the pointer identity temporarily live. 2243 ccast = new CastPPNode(val, tboth); 2244 } else { 2245 const TypeF* tf = tcon->isa_float_constant(); 2246 const TypeD* td = tcon->isa_double_constant(); 2247 // Exclude tests vs float/double 0 as these could be 2248 // either +0 or -0. Just because you are equal to +0 2249 // doesn't mean you ARE +0! 2250 // Note, following code also replaces Long and Oop values. 2251 if ((!tf || tf->_f != 0.0) && 2252 (!td || td->_d != 0.0)) 2253 cast = con; // Replace non-constant val by con. 2254 } 2255 } 2256 break; 2257 2258 case BoolTest::ne: 2259 if (tcon == TypePtr::NULL_PTR) { 2260 cast = cast_not_null(val, false); 2261 } 2262 break; 2263 2264 default: 2265 // (At this point we could record int range types with CastII.) 2266 break; 2267 } 2268 2269 if (ccast != NULL) { 2270 const Type* tcc = ccast->as_Type()->type(); 2271 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 2272 // Delay transform() call to allow recovery of pre-cast value 2273 // at the control merge. 2274 ccast->set_req(0, control()); 2275 _gvn.set_type_bottom(ccast); 2276 record_for_igvn(ccast); 2277 cast = ccast; 2278 } 2279 2280 if (cast != NULL) { // Here's the payoff. 2281 replace_in_map(val, cast); 2282 } 2283 } 2284 2285 /** 2286 * Use speculative type to optimize CmpP node: if comparison is 2287 * against the low level class, cast the object to the speculative 2288 * type if any. CmpP should then go away. 2289 * 2290 * @param c expected CmpP node 2291 * @return result of CmpP on object casted to speculative type 2292 * 2293 */ 2294 Node* Parse::optimize_cmp_with_klass(Node* c) { 2295 // If this is transformed by the _gvn to a comparison with the low 2296 // level klass then we may be able to use speculation 2297 if (c->Opcode() == Op_CmpP && 2298 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 2299 c->in(2)->is_Con()) { 2300 Node* load_klass = NULL; 2301 Node* decode = NULL; 2302 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 2303 decode = c->in(1); 2304 load_klass = c->in(1)->in(1); 2305 } else { 2306 load_klass = c->in(1); 2307 } 2308 if (load_klass->in(2)->is_AddP()) { 2309 Node* addp = load_klass->in(2); 2310 Node* obj = addp->in(AddPNode::Address); 2311 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 2312 if (obj_type->speculative_type_not_null() != NULL) { 2313 ciKlass* k = obj_type->speculative_type(); 2314 inc_sp(2); 2315 obj = maybe_cast_profiled_obj(obj, k); 2316 dec_sp(2); 2317 if (obj->is_ValueType()) { 2318 assert(obj->as_ValueType()->is_allocated(&_gvn), "must be allocated"); 2319 obj = obj->as_ValueType()->get_oop(); 2320 } 2321 // Make the CmpP use the casted obj 2322 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 2323 load_klass = load_klass->clone(); 2324 load_klass->set_req(2, addp); 2325 load_klass = _gvn.transform(load_klass); 2326 if (decode != NULL) { 2327 decode = decode->clone(); 2328 decode->set_req(1, load_klass); 2329 load_klass = _gvn.transform(decode); 2330 } 2331 c = c->clone(); 2332 c->set_req(1, load_klass); 2333 c = _gvn.transform(c); 2334 } 2335 } 2336 } 2337 return c; 2338 } 2339 2340 //------------------------------do_one_bytecode-------------------------------- 2341 // Parse this bytecode, and alter the Parsers JVM->Node mapping 2342 void Parse::do_one_bytecode() { 2343 Node *a, *b, *c, *d; // Handy temps 2344 BoolTest::mask btest; 2345 int i; 2346 2347 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 2348 2349 if (C->check_node_count(NodeLimitFudgeFactor * 5, 2350 "out of nodes parsing method")) { 2351 return; 2352 } 2353 2354 #ifdef ASSERT 2355 // for setting breakpoints 2356 if (TraceOptoParse) { 2357 tty->print(" @"); 2358 dump_bci(bci()); 2359 tty->cr(); 2360 } 2361 #endif 2362 2363 switch (bc()) { 2364 case Bytecodes::_nop: 2365 // do nothing 2366 break; 2367 case Bytecodes::_lconst_0: 2368 push_pair(longcon(0)); 2369 break; 2370 2371 case Bytecodes::_lconst_1: 2372 push_pair(longcon(1)); 2373 break; 2374 2375 case Bytecodes::_fconst_0: 2376 push(zerocon(T_FLOAT)); 2377 break; 2378 2379 case Bytecodes::_fconst_1: 2380 push(makecon(TypeF::ONE)); 2381 break; 2382 2383 case Bytecodes::_fconst_2: 2384 push(makecon(TypeF::make(2.0f))); 2385 break; 2386 2387 case Bytecodes::_dconst_0: 2388 push_pair(zerocon(T_DOUBLE)); 2389 break; 2390 2391 case Bytecodes::_dconst_1: 2392 push_pair(makecon(TypeD::ONE)); 2393 break; 2394 2395 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 2396 case Bytecodes::_iconst_0: push(intcon( 0)); break; 2397 case Bytecodes::_iconst_1: push(intcon( 1)); break; 2398 case Bytecodes::_iconst_2: push(intcon( 2)); break; 2399 case Bytecodes::_iconst_3: push(intcon( 3)); break; 2400 case Bytecodes::_iconst_4: push(intcon( 4)); break; 2401 case Bytecodes::_iconst_5: push(intcon( 5)); break; 2402 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 2403 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 2404 case Bytecodes::_aconst_null: push(null()); break; 2405 case Bytecodes::_ldc: 2406 case Bytecodes::_ldc_w: 2407 case Bytecodes::_ldc2_w: 2408 // If the constant is unresolved, run this BC once in the interpreter. 2409 { 2410 ciConstant constant = iter().get_constant(); 2411 if (!constant.is_valid() || 2412 (constant.basic_type() == T_OBJECT && 2413 !constant.as_object()->is_loaded())) { 2414 int index = iter().get_constant_pool_index(); 2415 constantTag tag = iter().get_constant_pool_tag(index); 2416 uncommon_trap(Deoptimization::make_trap_request 2417 (Deoptimization::Reason_unloaded, 2418 Deoptimization::Action_reinterpret, 2419 index), 2420 NULL, tag.internal_name()); 2421 break; 2422 } 2423 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 2424 "must be java_mirror of klass"); 2425 const Type* con_type = Type::make_from_constant(constant); 2426 if (con_type != NULL) { 2427 push_node(con_type->basic_type(), makecon(con_type)); 2428 } 2429 } 2430 2431 break; 2432 2433 case Bytecodes::_aload_0: 2434 push( local(0) ); 2435 break; 2436 case Bytecodes::_aload_1: 2437 push( local(1) ); 2438 break; 2439 case Bytecodes::_aload_2: 2440 push( local(2) ); 2441 break; 2442 case Bytecodes::_aload_3: 2443 push( local(3) ); 2444 break; 2445 case Bytecodes::_aload: 2446 push( local(iter().get_index()) ); 2447 break; 2448 2449 case Bytecodes::_fload_0: 2450 case Bytecodes::_iload_0: 2451 push( local(0) ); 2452 break; 2453 case Bytecodes::_fload_1: 2454 case Bytecodes::_iload_1: 2455 push( local(1) ); 2456 break; 2457 case Bytecodes::_fload_2: 2458 case Bytecodes::_iload_2: 2459 push( local(2) ); 2460 break; 2461 case Bytecodes::_fload_3: 2462 case Bytecodes::_iload_3: 2463 push( local(3) ); 2464 break; 2465 case Bytecodes::_fload: 2466 case Bytecodes::_iload: 2467 push( local(iter().get_index()) ); 2468 break; 2469 case Bytecodes::_lload_0: 2470 push_pair_local( 0 ); 2471 break; 2472 case Bytecodes::_lload_1: 2473 push_pair_local( 1 ); 2474 break; 2475 case Bytecodes::_lload_2: 2476 push_pair_local( 2 ); 2477 break; 2478 case Bytecodes::_lload_3: 2479 push_pair_local( 3 ); 2480 break; 2481 case Bytecodes::_lload: 2482 push_pair_local( iter().get_index() ); 2483 break; 2484 2485 case Bytecodes::_dload_0: 2486 push_pair_local(0); 2487 break; 2488 case Bytecodes::_dload_1: 2489 push_pair_local(1); 2490 break; 2491 case Bytecodes::_dload_2: 2492 push_pair_local(2); 2493 break; 2494 case Bytecodes::_dload_3: 2495 push_pair_local(3); 2496 break; 2497 case Bytecodes::_dload: 2498 push_pair_local(iter().get_index()); 2499 break; 2500 case Bytecodes::_fstore_0: 2501 case Bytecodes::_istore_0: 2502 case Bytecodes::_astore_0: 2503 set_local( 0, pop() ); 2504 break; 2505 case Bytecodes::_fstore_1: 2506 case Bytecodes::_istore_1: 2507 case Bytecodes::_astore_1: 2508 set_local( 1, pop() ); 2509 break; 2510 case Bytecodes::_fstore_2: 2511 case Bytecodes::_istore_2: 2512 case Bytecodes::_astore_2: 2513 set_local( 2, pop() ); 2514 break; 2515 case Bytecodes::_fstore_3: 2516 case Bytecodes::_istore_3: 2517 case Bytecodes::_astore_3: 2518 set_local( 3, pop() ); 2519 break; 2520 case Bytecodes::_fstore: 2521 case Bytecodes::_istore: 2522 case Bytecodes::_astore: 2523 set_local( iter().get_index(), pop() ); 2524 break; 2525 // long stores 2526 case Bytecodes::_lstore_0: 2527 set_pair_local( 0, pop_pair() ); 2528 break; 2529 case Bytecodes::_lstore_1: 2530 set_pair_local( 1, pop_pair() ); 2531 break; 2532 case Bytecodes::_lstore_2: 2533 set_pair_local( 2, pop_pair() ); 2534 break; 2535 case Bytecodes::_lstore_3: 2536 set_pair_local( 3, pop_pair() ); 2537 break; 2538 case Bytecodes::_lstore: 2539 set_pair_local( iter().get_index(), pop_pair() ); 2540 break; 2541 2542 // double stores 2543 case Bytecodes::_dstore_0: 2544 set_pair_local( 0, dstore_rounding(pop_pair()) ); 2545 break; 2546 case Bytecodes::_dstore_1: 2547 set_pair_local( 1, dstore_rounding(pop_pair()) ); 2548 break; 2549 case Bytecodes::_dstore_2: 2550 set_pair_local( 2, dstore_rounding(pop_pair()) ); 2551 break; 2552 case Bytecodes::_dstore_3: 2553 set_pair_local( 3, dstore_rounding(pop_pair()) ); 2554 break; 2555 case Bytecodes::_dstore: 2556 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 2557 break; 2558 2559 case Bytecodes::_pop: dec_sp(1); break; 2560 case Bytecodes::_pop2: dec_sp(2); break; 2561 case Bytecodes::_swap: 2562 a = pop(); 2563 b = pop(); 2564 push(a); 2565 push(b); 2566 break; 2567 case Bytecodes::_dup: 2568 a = pop(); 2569 push(a); 2570 push(a); 2571 break; 2572 case Bytecodes::_dup_x1: 2573 a = pop(); 2574 b = pop(); 2575 push( a ); 2576 push( b ); 2577 push( a ); 2578 break; 2579 case Bytecodes::_dup_x2: 2580 a = pop(); 2581 b = pop(); 2582 c = pop(); 2583 push( a ); 2584 push( c ); 2585 push( b ); 2586 push( a ); 2587 break; 2588 case Bytecodes::_dup2: 2589 a = pop(); 2590 b = pop(); 2591 push( b ); 2592 push( a ); 2593 push( b ); 2594 push( a ); 2595 break; 2596 2597 case Bytecodes::_dup2_x1: 2598 // before: .. c, b, a 2599 // after: .. b, a, c, b, a 2600 // not tested 2601 a = pop(); 2602 b = pop(); 2603 c = pop(); 2604 push( b ); 2605 push( a ); 2606 push( c ); 2607 push( b ); 2608 push( a ); 2609 break; 2610 case Bytecodes::_dup2_x2: 2611 // before: .. d, c, b, a 2612 // after: .. b, a, d, c, b, a 2613 // not tested 2614 a = pop(); 2615 b = pop(); 2616 c = pop(); 2617 d = pop(); 2618 push( b ); 2619 push( a ); 2620 push( d ); 2621 push( c ); 2622 push( b ); 2623 push( a ); 2624 break; 2625 2626 case Bytecodes::_arraylength: { 2627 // Must do null-check with value on expression stack 2628 Node *ary = null_check(peek(), T_ARRAY); 2629 // Compile-time detect of null-exception? 2630 if (stopped()) return; 2631 a = pop(); 2632 push(load_array_length(a)); 2633 break; 2634 } 2635 2636 case Bytecodes::_baload: array_load(T_BYTE); break; 2637 case Bytecodes::_caload: array_load(T_CHAR); break; 2638 case Bytecodes::_iaload: array_load(T_INT); break; 2639 case Bytecodes::_saload: array_load(T_SHORT); break; 2640 case Bytecodes::_faload: array_load(T_FLOAT); break; 2641 case Bytecodes::_aaload: array_load(T_OBJECT); break; 2642 case Bytecodes::_laload: array_load(T_LONG); break; 2643 case Bytecodes::_daload: array_load(T_DOUBLE); break; 2644 case Bytecodes::_bastore: array_store(T_BYTE); break; 2645 case Bytecodes::_castore: array_store(T_CHAR); break; 2646 case Bytecodes::_iastore: array_store(T_INT); break; 2647 case Bytecodes::_sastore: array_store(T_SHORT); break; 2648 case Bytecodes::_fastore: array_store(T_FLOAT); break; 2649 case Bytecodes::_aastore: array_store(T_OBJECT); break; 2650 case Bytecodes::_lastore: array_store(T_LONG); break; 2651 case Bytecodes::_dastore: array_store(T_DOUBLE); break; 2652 2653 case Bytecodes::_getfield: 2654 do_getfield(); 2655 break; 2656 2657 case Bytecodes::_getstatic: 2658 do_getstatic(); 2659 break; 2660 2661 case Bytecodes::_putfield: 2662 do_putfield(); 2663 break; 2664 2665 case Bytecodes::_putstatic: 2666 do_putstatic(); 2667 break; 2668 2669 case Bytecodes::_irem: 2670 do_irem(); 2671 break; 2672 case Bytecodes::_idiv: 2673 // Must keep both values on the expression-stack during null-check 2674 zero_check_int(peek()); 2675 // Compile-time detect of null-exception? 2676 if (stopped()) return; 2677 b = pop(); 2678 a = pop(); 2679 push( _gvn.transform( new DivINode(control(),a,b) ) ); 2680 break; 2681 case Bytecodes::_imul: 2682 b = pop(); a = pop(); 2683 push( _gvn.transform( new MulINode(a,b) ) ); 2684 break; 2685 case Bytecodes::_iadd: 2686 b = pop(); a = pop(); 2687 push( _gvn.transform( new AddINode(a,b) ) ); 2688 break; 2689 case Bytecodes::_ineg: 2690 a = pop(); 2691 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 2692 break; 2693 case Bytecodes::_isub: 2694 b = pop(); a = pop(); 2695 push( _gvn.transform( new SubINode(a,b) ) ); 2696 break; 2697 case Bytecodes::_iand: 2698 b = pop(); a = pop(); 2699 push( _gvn.transform( new AndINode(a,b) ) ); 2700 break; 2701 case Bytecodes::_ior: 2702 b = pop(); a = pop(); 2703 push( _gvn.transform( new OrINode(a,b) ) ); 2704 break; 2705 case Bytecodes::_ixor: 2706 b = pop(); a = pop(); 2707 push( _gvn.transform( new XorINode(a,b) ) ); 2708 break; 2709 case Bytecodes::_ishl: 2710 b = pop(); a = pop(); 2711 push( _gvn.transform( new LShiftINode(a,b) ) ); 2712 break; 2713 case Bytecodes::_ishr: 2714 b = pop(); a = pop(); 2715 push( _gvn.transform( new RShiftINode(a,b) ) ); 2716 break; 2717 case Bytecodes::_iushr: 2718 b = pop(); a = pop(); 2719 push( _gvn.transform( new URShiftINode(a,b) ) ); 2720 break; 2721 2722 case Bytecodes::_fneg: 2723 a = pop(); 2724 b = _gvn.transform(new NegFNode (a)); 2725 push(b); 2726 break; 2727 2728 case Bytecodes::_fsub: 2729 b = pop(); 2730 a = pop(); 2731 c = _gvn.transform( new SubFNode(a,b) ); 2732 d = precision_rounding(c); 2733 push( d ); 2734 break; 2735 2736 case Bytecodes::_fadd: 2737 b = pop(); 2738 a = pop(); 2739 c = _gvn.transform( new AddFNode(a,b) ); 2740 d = precision_rounding(c); 2741 push( d ); 2742 break; 2743 2744 case Bytecodes::_fmul: 2745 b = pop(); 2746 a = pop(); 2747 c = _gvn.transform( new MulFNode(a,b) ); 2748 d = precision_rounding(c); 2749 push( d ); 2750 break; 2751 2752 case Bytecodes::_fdiv: 2753 b = pop(); 2754 a = pop(); 2755 c = _gvn.transform( new DivFNode(0,a,b) ); 2756 d = precision_rounding(c); 2757 push( d ); 2758 break; 2759 2760 case Bytecodes::_frem: 2761 if (Matcher::has_match_rule(Op_ModF)) { 2762 // Generate a ModF node. 2763 b = pop(); 2764 a = pop(); 2765 c = _gvn.transform( new ModFNode(0,a,b) ); 2766 d = precision_rounding(c); 2767 push( d ); 2768 } 2769 else { 2770 // Generate a call. 2771 modf(); 2772 } 2773 break; 2774 2775 case Bytecodes::_fcmpl: 2776 b = pop(); 2777 a = pop(); 2778 c = _gvn.transform( new CmpF3Node( a, b)); 2779 push(c); 2780 break; 2781 case Bytecodes::_fcmpg: 2782 b = pop(); 2783 a = pop(); 2784 2785 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 2786 // which negates the result sign except for unordered. Flip the unordered 2787 // as well by using CmpF3 which implements unordered-lesser instead of 2788 // unordered-greater semantics. Finally, commute the result bits. Result 2789 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 2790 c = _gvn.transform( new CmpF3Node( b, a)); 2791 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2792 push(c); 2793 break; 2794 2795 case Bytecodes::_f2i: 2796 a = pop(); 2797 push(_gvn.transform(new ConvF2INode(a))); 2798 break; 2799 2800 case Bytecodes::_d2i: 2801 a = pop_pair(); 2802 b = _gvn.transform(new ConvD2INode(a)); 2803 push( b ); 2804 break; 2805 2806 case Bytecodes::_f2d: 2807 a = pop(); 2808 b = _gvn.transform( new ConvF2DNode(a)); 2809 push_pair( b ); 2810 break; 2811 2812 case Bytecodes::_d2f: 2813 a = pop_pair(); 2814 b = _gvn.transform( new ConvD2FNode(a)); 2815 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 2816 //b = _gvn.transform(new RoundFloatNode(0, b) ); 2817 push( b ); 2818 break; 2819 2820 case Bytecodes::_l2f: 2821 if (Matcher::convL2FSupported()) { 2822 a = pop_pair(); 2823 b = _gvn.transform( new ConvL2FNode(a)); 2824 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 2825 // Rather than storing the result into an FP register then pushing 2826 // out to memory to round, the machine instruction that implements 2827 // ConvL2D is responsible for rounding. 2828 // c = precision_rounding(b); 2829 c = _gvn.transform(b); 2830 push(c); 2831 } else { 2832 l2f(); 2833 } 2834 break; 2835 2836 case Bytecodes::_l2d: 2837 a = pop_pair(); 2838 b = _gvn.transform( new ConvL2DNode(a)); 2839 // For i486.ad, rounding is always necessary (see _l2f above). 2840 // c = dprecision_rounding(b); 2841 c = _gvn.transform(b); 2842 push_pair(c); 2843 break; 2844 2845 case Bytecodes::_f2l: 2846 a = pop(); 2847 b = _gvn.transform( new ConvF2LNode(a)); 2848 push_pair(b); 2849 break; 2850 2851 case Bytecodes::_d2l: 2852 a = pop_pair(); 2853 b = _gvn.transform( new ConvD2LNode(a)); 2854 push_pair(b); 2855 break; 2856 2857 case Bytecodes::_dsub: 2858 b = pop_pair(); 2859 a = pop_pair(); 2860 c = _gvn.transform( new SubDNode(a,b) ); 2861 d = dprecision_rounding(c); 2862 push_pair( d ); 2863 break; 2864 2865 case Bytecodes::_dadd: 2866 b = pop_pair(); 2867 a = pop_pair(); 2868 c = _gvn.transform( new AddDNode(a,b) ); 2869 d = dprecision_rounding(c); 2870 push_pair( d ); 2871 break; 2872 2873 case Bytecodes::_dmul: 2874 b = pop_pair(); 2875 a = pop_pair(); 2876 c = _gvn.transform( new MulDNode(a,b) ); 2877 d = dprecision_rounding(c); 2878 push_pair( d ); 2879 break; 2880 2881 case Bytecodes::_ddiv: 2882 b = pop_pair(); 2883 a = pop_pair(); 2884 c = _gvn.transform( new DivDNode(0,a,b) ); 2885 d = dprecision_rounding(c); 2886 push_pair( d ); 2887 break; 2888 2889 case Bytecodes::_dneg: 2890 a = pop_pair(); 2891 b = _gvn.transform(new NegDNode (a)); 2892 push_pair(b); 2893 break; 2894 2895 case Bytecodes::_drem: 2896 if (Matcher::has_match_rule(Op_ModD)) { 2897 // Generate a ModD node. 2898 b = pop_pair(); 2899 a = pop_pair(); 2900 // a % b 2901 2902 c = _gvn.transform( new ModDNode(0,a,b) ); 2903 d = dprecision_rounding(c); 2904 push_pair( d ); 2905 } 2906 else { 2907 // Generate a call. 2908 modd(); 2909 } 2910 break; 2911 2912 case Bytecodes::_dcmpl: 2913 b = pop_pair(); 2914 a = pop_pair(); 2915 c = _gvn.transform( new CmpD3Node( a, b)); 2916 push(c); 2917 break; 2918 2919 case Bytecodes::_dcmpg: 2920 b = pop_pair(); 2921 a = pop_pair(); 2922 // Same as dcmpl but need to flip the unordered case. 2923 // Commute the inputs, which negates the result sign except for unordered. 2924 // Flip the unordered as well by using CmpD3 which implements 2925 // unordered-lesser instead of unordered-greater semantics. 2926 // Finally, negate the result bits. Result is same as using a 2927 // CmpD3Greater except we did it with CmpD3 alone. 2928 c = _gvn.transform( new CmpD3Node( b, a)); 2929 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2930 push(c); 2931 break; 2932 2933 2934 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2935 case Bytecodes::_land: 2936 b = pop_pair(); 2937 a = pop_pair(); 2938 c = _gvn.transform( new AndLNode(a,b) ); 2939 push_pair(c); 2940 break; 2941 case Bytecodes::_lor: 2942 b = pop_pair(); 2943 a = pop_pair(); 2944 c = _gvn.transform( new OrLNode(a,b) ); 2945 push_pair(c); 2946 break; 2947 case Bytecodes::_lxor: 2948 b = pop_pair(); 2949 a = pop_pair(); 2950 c = _gvn.transform( new XorLNode(a,b) ); 2951 push_pair(c); 2952 break; 2953 2954 case Bytecodes::_lshl: 2955 b = pop(); // the shift count 2956 a = pop_pair(); // value to be shifted 2957 c = _gvn.transform( new LShiftLNode(a,b) ); 2958 push_pair(c); 2959 break; 2960 case Bytecodes::_lshr: 2961 b = pop(); // the shift count 2962 a = pop_pair(); // value to be shifted 2963 c = _gvn.transform( new RShiftLNode(a,b) ); 2964 push_pair(c); 2965 break; 2966 case Bytecodes::_lushr: 2967 b = pop(); // the shift count 2968 a = pop_pair(); // value to be shifted 2969 c = _gvn.transform( new URShiftLNode(a,b) ); 2970 push_pair(c); 2971 break; 2972 case Bytecodes::_lmul: 2973 b = pop_pair(); 2974 a = pop_pair(); 2975 c = _gvn.transform( new MulLNode(a,b) ); 2976 push_pair(c); 2977 break; 2978 2979 case Bytecodes::_lrem: 2980 // Must keep both values on the expression-stack during null-check 2981 assert(peek(0) == top(), "long word order"); 2982 zero_check_long(peek(1)); 2983 // Compile-time detect of null-exception? 2984 if (stopped()) return; 2985 b = pop_pair(); 2986 a = pop_pair(); 2987 c = _gvn.transform( new ModLNode(control(),a,b) ); 2988 push_pair(c); 2989 break; 2990 2991 case Bytecodes::_ldiv: 2992 // Must keep both values on the expression-stack during null-check 2993 assert(peek(0) == top(), "long word order"); 2994 zero_check_long(peek(1)); 2995 // Compile-time detect of null-exception? 2996 if (stopped()) return; 2997 b = pop_pair(); 2998 a = pop_pair(); 2999 c = _gvn.transform( new DivLNode(control(),a,b) ); 3000 push_pair(c); 3001 break; 3002 3003 case Bytecodes::_ladd: 3004 b = pop_pair(); 3005 a = pop_pair(); 3006 c = _gvn.transform( new AddLNode(a,b) ); 3007 push_pair(c); 3008 break; 3009 case Bytecodes::_lsub: 3010 b = pop_pair(); 3011 a = pop_pair(); 3012 c = _gvn.transform( new SubLNode(a,b) ); 3013 push_pair(c); 3014 break; 3015 case Bytecodes::_lcmp: 3016 // Safepoints are now inserted _before_ branches. The long-compare 3017 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 3018 // slew of control flow. These are usually followed by a CmpI vs zero and 3019 // a branch; this pattern then optimizes to the obvious long-compare and 3020 // branch. However, if the branch is backwards there's a Safepoint 3021 // inserted. The inserted Safepoint captures the JVM state at the 3022 // pre-branch point, i.e. it captures the 3-way value. Thus if a 3023 // long-compare is used to control a loop the debug info will force 3024 // computation of the 3-way value, even though the generated code uses a 3025 // long-compare and branch. We try to rectify the situation by inserting 3026 // a SafePoint here and have it dominate and kill the safepoint added at a 3027 // following backwards branch. At this point the JVM state merely holds 2 3028 // longs but not the 3-way value. 3029 if( UseLoopSafepoints ) { 3030 switch( iter().next_bc() ) { 3031 case Bytecodes::_ifgt: 3032 case Bytecodes::_iflt: 3033 case Bytecodes::_ifge: 3034 case Bytecodes::_ifle: 3035 case Bytecodes::_ifne: 3036 case Bytecodes::_ifeq: 3037 // If this is a backwards branch in the bytecodes, add Safepoint 3038 maybe_add_safepoint(iter().next_get_dest()); 3039 default: 3040 break; 3041 } 3042 } 3043 b = pop_pair(); 3044 a = pop_pair(); 3045 c = _gvn.transform( new CmpL3Node( a, b )); 3046 push(c); 3047 break; 3048 3049 case Bytecodes::_lneg: 3050 a = pop_pair(); 3051 b = _gvn.transform( new SubLNode(longcon(0),a)); 3052 push_pair(b); 3053 break; 3054 case Bytecodes::_l2i: 3055 a = pop_pair(); 3056 push( _gvn.transform( new ConvL2INode(a))); 3057 break; 3058 case Bytecodes::_i2l: 3059 a = pop(); 3060 b = _gvn.transform( new ConvI2LNode(a)); 3061 push_pair(b); 3062 break; 3063 case Bytecodes::_i2b: 3064 // Sign extend 3065 a = pop(); 3066 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); 3067 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); 3068 push( a ); 3069 break; 3070 case Bytecodes::_i2s: 3071 a = pop(); 3072 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); 3073 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); 3074 push( a ); 3075 break; 3076 case Bytecodes::_i2c: 3077 a = pop(); 3078 push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); 3079 break; 3080 3081 case Bytecodes::_i2f: 3082 a = pop(); 3083 b = _gvn.transform( new ConvI2FNode(a) ) ; 3084 c = precision_rounding(b); 3085 push (b); 3086 break; 3087 3088 case Bytecodes::_i2d: 3089 a = pop(); 3090 b = _gvn.transform( new ConvI2DNode(a)); 3091 push_pair(b); 3092 break; 3093 3094 case Bytecodes::_iinc: // Increment local 3095 i = iter().get_index(); // Get local index 3096 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 3097 break; 3098 3099 // Exit points of synchronized methods must have an unlock node 3100 case Bytecodes::_return: 3101 return_current(NULL); 3102 break; 3103 3104 case Bytecodes::_ireturn: 3105 case Bytecodes::_areturn: 3106 case Bytecodes::_freturn: 3107 return_current(pop()); 3108 break; 3109 case Bytecodes::_lreturn: 3110 return_current(pop_pair()); 3111 break; 3112 case Bytecodes::_dreturn: 3113 return_current(pop_pair()); 3114 break; 3115 3116 case Bytecodes::_athrow: 3117 // null exception oop throws NULL pointer exception 3118 null_check(peek()); 3119 if (stopped()) return; 3120 // Hook the thrown exception directly to subsequent handlers. 3121 if (BailoutToInterpreterForThrows) { 3122 // Keep method interpreted from now on. 3123 uncommon_trap(Deoptimization::Reason_unhandled, 3124 Deoptimization::Action_make_not_compilable); 3125 return; 3126 } 3127 if (env()->jvmti_can_post_on_exceptions()) { 3128 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 3129 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 3130 } 3131 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 3132 add_exception_state(make_exception_state(peek())); 3133 break; 3134 3135 case Bytecodes::_goto: // fall through 3136 case Bytecodes::_goto_w: { 3137 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 3138 3139 // If this is a backwards branch in the bytecodes, add Safepoint 3140 maybe_add_safepoint(target_bci); 3141 3142 // Update method data 3143 profile_taken_branch(target_bci); 3144 3145 // Merge the current control into the target basic block 3146 merge(target_bci); 3147 3148 // See if we can get some profile data and hand it off to the next block 3149 Block *target_block = block()->successor_for_bci(target_bci); 3150 if (target_block->pred_count() != 1) break; 3151 ciMethodData* methodData = method()->method_data(); 3152 if (!methodData->is_mature()) break; 3153 ciProfileData* data = methodData->bci_to_data(bci()); 3154 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 3155 int taken = ((ciJumpData*)data)->taken(); 3156 taken = method()->scale_count(taken); 3157 target_block->set_count(taken); 3158 break; 3159 } 3160 3161 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 3162 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 3163 handle_if_null: 3164 // If this is a backwards branch in the bytecodes, add Safepoint 3165 maybe_add_safepoint(iter().get_dest()); 3166 a = null(); 3167 b = pop(); 3168 if (b->is_ValueType()) { 3169 // Return constant false because 'b' is always non-null 3170 c = _gvn.makecon(TypeInt::CC_GT); 3171 } else { 3172 if (!_gvn.type(b)->speculative_maybe_null() && 3173 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 3174 inc_sp(1); 3175 Node* null_ctl = top(); 3176 b = null_check_oop(b, &null_ctl, true, true, true); 3177 assert(null_ctl->is_top(), "no null control here"); 3178 dec_sp(1); 3179 } else if (_gvn.type(b)->speculative_always_null() && 3180 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { 3181 inc_sp(1); 3182 b = null_assert(b); 3183 dec_sp(1); 3184 } 3185 c = _gvn.transform( new CmpPNode(b, a) ); 3186 } 3187 do_ifnull(btest, c); 3188 break; 3189 3190 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 3191 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 3192 handle_if_acmp: 3193 // If this is a backwards branch in the bytecodes, add Safepoint 3194 maybe_add_safepoint(iter().get_dest()); 3195 a = access_resolve(pop(), 0); 3196 b = access_resolve(pop(), 0); 3197 do_acmp(btest, a, b); 3198 break; 3199 3200 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 3201 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 3202 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 3203 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 3204 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 3205 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 3206 handle_ifxx: 3207 // If this is a backwards branch in the bytecodes, add Safepoint 3208 maybe_add_safepoint(iter().get_dest()); 3209 a = _gvn.intcon(0); 3210 b = pop(); 3211 c = _gvn.transform( new CmpINode(b, a) ); 3212 do_if(btest, c); 3213 break; 3214 3215 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 3216 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 3217 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 3218 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 3219 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 3220 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 3221 handle_if_icmp: 3222 // If this is a backwards branch in the bytecodes, add Safepoint 3223 maybe_add_safepoint(iter().get_dest()); 3224 a = pop(); 3225 b = pop(); 3226 c = _gvn.transform( new CmpINode( b, a ) ); 3227 do_if(btest, c); 3228 break; 3229 3230 case Bytecodes::_tableswitch: 3231 do_tableswitch(); 3232 break; 3233 3234 case Bytecodes::_lookupswitch: 3235 do_lookupswitch(); 3236 break; 3237 3238 case Bytecodes::_invokestatic: 3239 case Bytecodes::_invokedynamic: 3240 case Bytecodes::_invokespecial: 3241 case Bytecodes::_invokevirtual: 3242 case Bytecodes::_invokeinterface: 3243 do_call(); 3244 break; 3245 case Bytecodes::_checkcast: 3246 do_checkcast(); 3247 break; 3248 case Bytecodes::_instanceof: 3249 do_instanceof(); 3250 break; 3251 case Bytecodes::_anewarray: 3252 do_newarray(); 3253 break; 3254 case Bytecodes::_newarray: 3255 do_newarray((BasicType)iter().get_index()); 3256 break; 3257 case Bytecodes::_multianewarray: 3258 do_multianewarray(); 3259 break; 3260 case Bytecodes::_new: 3261 do_new(); 3262 break; 3263 case Bytecodes::_defaultvalue: 3264 do_defaultvalue(); 3265 break; 3266 case Bytecodes::_withfield: 3267 do_withfield(); 3268 break; 3269 3270 case Bytecodes::_jsr: 3271 case Bytecodes::_jsr_w: 3272 do_jsr(); 3273 break; 3274 3275 case Bytecodes::_ret: 3276 do_ret(); 3277 break; 3278 3279 3280 case Bytecodes::_monitorenter: 3281 do_monitor_enter(); 3282 break; 3283 3284 case Bytecodes::_monitorexit: 3285 do_monitor_exit(); 3286 break; 3287 3288 case Bytecodes::_breakpoint: 3289 // Breakpoint set concurrently to compile 3290 // %%% use an uncommon trap? 3291 C->record_failure("breakpoint in method"); 3292 return; 3293 3294 default: 3295 #ifndef PRODUCT 3296 map()->dump(99); 3297 #endif 3298 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 3299 ShouldNotReachHere(); 3300 } 3301 3302 #ifndef PRODUCT 3303 IdealGraphPrinter *printer = C->printer(); 3304 if (printer && printer->should_print(1)) { 3305 char buffer[256]; 3306 jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 3307 bool old = printer->traverse_outs(); 3308 printer->set_traverse_outs(true); 3309 printer->print_method(buffer, 4); 3310 printer->set_traverse_outs(old); 3311 } 3312 #endif 3313 }