1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/idealKit.hpp" 40 #include "opto/matcher.hpp" 41 #include "opto/memnode.hpp" 42 #include "opto/mulnode.hpp" 43 #include "opto/opaquenode.hpp" 44 #include "opto/parse.hpp" 45 #include "opto/runtime.hpp" 46 #include "opto/valuetypenode.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 50 #ifndef PRODUCT 51 extern int explicit_null_checks_inserted, 52 explicit_null_checks_elided; 53 #endif 54 55 //---------------------------------array_load---------------------------------- 56 void Parse::array_load(BasicType bt) { 57 const Type* elemtype = Type::TOP; 58 Node* adr = array_addressing(bt, 0, &elemtype); 59 if (stopped()) return; // guaranteed null or range check 60 61 Node* idx = pop(); 62 Node* ary = pop(); 63 64 // Handle value type arrays 65 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 66 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 67 if (elemtype->isa_valuetype() != NULL) { 68 // Load from flattened value type array 69 ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); 70 Node* vt = ValueTypeNode::make_from_flattened(this, vk, ary, adr); 71 push(vt); 72 return; 73 } else if (elemptr != NULL && elemptr->is_valuetypeptr() && !elemptr->maybe_null()) { 74 // Load from non-flattened value type array (elements can never be null) 75 bt = T_VALUETYPE; 76 } else if (ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type() && !ary_t->klass_is_exact()) { 77 // Cannot statically determine if array is flattened, emit runtime check 78 assert(!elemptr->is_valuetypeptr(), "we know the exact type"); 79 IdealKit ideal(this); 80 IdealVariable res(ideal); 81 ideal.declarations_done(); 82 Node* kls = load_object_klass(ary); 83 Node* tag = load_lh_array_tag(kls); 84 ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { 85 // non flattened 86 sync_kit(ideal); 87 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 88 elemtype = ary_t->elem()->make_oopptr(); 89 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, 90 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 91 ideal.sync_kit(this); 92 ideal.set(res, ld); 93 } ideal.else_(); { 94 // flattened 95 sync_kit(ideal); 96 Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset())); 97 Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); 98 Node* obj_size = NULL; 99 kill_dead_locals(); 100 inc_sp(2); 101 Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true); 102 dec_sp(2); 103 104 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); 105 assert(alloc->maybe_set_complete(&_gvn), ""); 106 alloc->initialization()->set_complete_with_arraycopy(); 107 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 108 // Unknown value type so might have reference fields 109 if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) { 110 int base_off = sizeof(instanceOopDesc); 111 Node* dst_base = basic_plus_adr(alloc_obj, base_off); 112 Node* countx = obj_size; 113 countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); 114 countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); 115 116 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); 117 Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset())); 118 Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); 119 uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE); 120 Node* base = basic_plus_adr(ary, header); 121 idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control()); 122 Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift)); 123 Node* adr = basic_plus_adr(ary, base, scale); 124 125 access_clone(adr, dst_base, countx, false); 126 } else { 127 ideal.sync_kit(this); 128 ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(), 129 CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value), 130 "load_unknown_value", 131 ary, idx, alloc_obj); 132 sync_kit(ideal); 133 } 134 135 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); 136 137 ideal.sync_kit(this); 138 ideal.set(res, alloc_obj); 139 } ideal.end_if(); 140 sync_kit(ideal); 141 push_node(bt, ideal.value(res)); 142 return; 143 } 144 145 if (elemtype == TypeInt::BOOL) { 146 bt = T_BOOLEAN; 147 } else if (bt == T_OBJECT) { 148 elemtype = ary_t->elem()->make_oopptr(); 149 } 150 151 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 152 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, 153 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 154 if (bt == T_VALUETYPE) { 155 // Loading a non-flattened (but flattenable) value type from an array 156 assert(!gvn().type(ld)->maybe_null(), "value type array elements should never be null"); 157 if (elemptr->value_klass()->is_scalarizable()) { 158 ld = ValueTypeNode::make_from_oop(this, ld, elemptr->value_klass()); 159 } 160 } 161 162 push_node(bt, ld); 163 } 164 165 166 //--------------------------------array_store---------------------------------- 167 void Parse::array_store(BasicType bt) { 168 const Type* elemtype = Type::TOP; 169 Node* adr = array_addressing(bt, type2size[bt], &elemtype); 170 if (stopped()) return; // guaranteed null or range check 171 Node* cast_val = NULL; 172 if (bt == T_OBJECT) { 173 cast_val = array_store_check(); 174 if (stopped()) return; 175 } 176 Node* val = pop_node(bt); // Value to store 177 Node* idx = pop(); // Index in the array 178 Node* ary = pop(); // The array itself 179 180 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 181 if (bt == T_OBJECT) { 182 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 183 const Type* val_t = _gvn.type(val); 184 if (elemtype->isa_valuetype() != NULL) { 185 // Store to flattened value type array 186 if (!cast_val->is_ValueType()) { 187 inc_sp(3); 188 cast_val = null_check(cast_val); 189 if (stopped()) return; 190 dec_sp(3); 191 cast_val = ValueTypeNode::make_from_oop(this, cast_val, elemtype->is_valuetype()->value_klass()); 192 } 193 cast_val->as_ValueType()->store_flattened(this, ary, adr); 194 return; 195 } else if (elemptr->is_valuetypeptr() && !elemptr->maybe_null()) { 196 // Store to non-flattened value type array 197 if (!cast_val->is_ValueType()) { 198 // Can not store null into a value type array 199 inc_sp(3); 200 cast_val = null_check(cast_val); 201 if (stopped()) return; 202 dec_sp(3); 203 } 204 } else if (elemptr->can_be_value_type() && !ary_t->klass_is_exact() && 205 (val->is_ValueType() || val_t == TypePtr::NULL_PTR || val_t->is_oopptr()->can_be_value_type())) { 206 if (ValueArrayFlatten) { 207 IdealKit ideal(this); 208 Node* kls = load_object_klass(ary); 209 Node* layout_val = load_lh_array_tag(kls); 210 ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { 211 // non flattened 212 sync_kit(ideal); 213 214 if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { 215 gen_value_type_array_guard(ary, val, 3); 216 } 217 218 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 219 elemtype = ary_t->elem()->make_oopptr(); 220 access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); 221 ideal.sync_kit(this); 222 } ideal.else_(); { 223 // flattened 224 // Object/interface array must be flattened, cast it 225 if (val->is_ValueType()) { 226 sync_kit(ideal); 227 const TypeValueType* vt = _gvn.type(val)->is_valuetype(); 228 ciArrayKlass* array_klass = ciArrayKlass::make(vt->value_klass(), true); 229 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); 230 ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); 231 adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control()); 232 val->as_ValueType()->store_flattened(this, ary, adr); 233 ideal.sync_kit(this); 234 } else { 235 if (TypePtr::NULL_PTR->higher_equal(val_t)) { 236 sync_kit(ideal); 237 Node* null_ctl = top(); 238 val = null_check_oop(val, &null_ctl); 239 if (null_ctl != top()) { 240 PreserveJVMState pjvms(this); 241 inc_sp(3); 242 set_control(null_ctl); 243 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); 244 dec_sp(3); 245 } 246 ideal.sync_kit(this); 247 } 248 if (!ideal.ctrl()->is_top()) { 249 ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(), 250 CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value), 251 "store_unknown_value", 252 val, ary, idx); 253 } 254 } 255 } ideal.end_if(); 256 sync_kit(ideal); 257 return; 258 } else { 259 if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { 260 gen_value_type_array_guard(ary, val, 3); 261 } 262 } 263 } 264 } 265 266 if (elemtype == TypeInt::BOOL) { 267 bt = T_BOOLEAN; 268 } else if (bt == T_OBJECT) { 269 elemtype = ary_t->elem()->make_oopptr(); 270 } 271 272 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 273 274 access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); 275 } 276 277 278 //------------------------------array_addressing------------------------------- 279 // Pull array and index from the stack. Compute pointer-to-element. 280 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 281 Node *idx = peek(0+vals); // Get from stack without popping 282 Node *ary = peek(1+vals); // in case of exception 283 284 // Null check the array base, with correct stack contents 285 ary = null_check(ary, T_ARRAY); 286 // Compile-time detect of null-exception? 287 if (stopped()) return top(); 288 289 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 290 const TypeInt* sizetype = arytype->size(); 291 const Type* elemtype = arytype->elem(); 292 293 if (UseUniqueSubclasses && result2 != NULL) { 294 const Type* el = elemtype->make_ptr(); 295 if (el && el->isa_instptr()) { 296 const TypeInstPtr* toop = el->is_instptr(); 297 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 298 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 299 const Type* subklass = Type::get_const_type(toop->klass()); 300 elemtype = subklass->join_speculative(el); 301 } 302 } 303 } 304 305 // Check for big class initializers with all constant offsets 306 // feeding into a known-size array. 307 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 308 // See if the highest idx value is less than the lowest array bound, 309 // and if the idx value cannot be negative: 310 bool need_range_check = true; 311 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 312 need_range_check = false; 313 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 314 } 315 316 ciKlass * arytype_klass = arytype->klass(); 317 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 318 // Only fails for some -Xcomp runs 319 // The class is unloaded. We have to run this bytecode in the interpreter. 320 uncommon_trap(Deoptimization::Reason_unloaded, 321 Deoptimization::Action_reinterpret, 322 arytype->klass(), "!loaded array"); 323 return top(); 324 } 325 326 // Do the range check 327 if (GenerateRangeChecks && need_range_check) { 328 Node* tst; 329 if (sizetype->_hi <= 0) { 330 // The greatest array bound is negative, so we can conclude that we're 331 // compiling unreachable code, but the unsigned compare trick used below 332 // only works with non-negative lengths. Instead, hack "tst" to be zero so 333 // the uncommon_trap path will always be taken. 334 tst = _gvn.intcon(0); 335 } else { 336 // Range is constant in array-oop, so we can use the original state of mem 337 Node* len = load_array_length(ary); 338 339 // Test length vs index (standard trick using unsigned compare) 340 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 341 BoolTest::mask btest = BoolTest::lt; 342 tst = _gvn.transform( new BoolNode(chk, btest) ); 343 } 344 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 345 _gvn.set_type(rc, rc->Value(&_gvn)); 346 if (!tst->is_Con()) { 347 record_for_igvn(rc); 348 } 349 set_control(_gvn.transform(new IfTrueNode(rc))); 350 // Branch to failure if out of bounds 351 { 352 PreserveJVMState pjvms(this); 353 set_control(_gvn.transform(new IfFalseNode(rc))); 354 if (C->allow_range_check_smearing()) { 355 // Do not use builtin_throw, since range checks are sometimes 356 // made more stringent by an optimistic transformation. 357 // This creates "tentative" range checks at this point, 358 // which are not guaranteed to throw exceptions. 359 // See IfNode::Ideal, is_range_check, adjust_check. 360 uncommon_trap(Deoptimization::Reason_range_check, 361 Deoptimization::Action_make_not_entrant, 362 NULL, "range_check"); 363 } else { 364 // If we have already recompiled with the range-check-widening 365 // heroic optimization turned off, then we must really be throwing 366 // range check exceptions. 367 builtin_throw(Deoptimization::Reason_range_check, idx); 368 } 369 } 370 } 371 // Check for always knowing you are throwing a range-check exception 372 if (stopped()) return top(); 373 374 // Make array address computation control dependent to prevent it 375 // from floating above the range check during loop optimizations. 376 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 377 378 if (result2 != NULL) *result2 = elemtype; 379 380 assert(ptr != top(), "top should go hand-in-hand with stopped"); 381 382 return ptr; 383 } 384 385 386 // returns IfNode 387 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) { 388 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 389 Node *tst = _gvn.transform(new BoolNode(cmp, mask)); 390 IfNode *iff = create_and_map_if(control(), tst, prob, cnt); 391 return iff; 392 } 393 394 // return Region node 395 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 396 Node *region = new RegionNode(3); // 2 results 397 record_for_igvn(region); 398 region->init_req(1, iffalse); 399 region->init_req(2, iftrue ); 400 _gvn.set_type(region, Type::CONTROL); 401 region = _gvn.transform(region); 402 set_control (region); 403 return region; 404 } 405 406 // sentinel value for the target bci to mark never taken branches 407 // (according to profiling) 408 static const int never_reached = INT_MAX; 409 410 //------------------------------helper for tableswitch------------------------- 411 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 412 // True branch, use existing map info 413 { PreserveJVMState pjvms(this); 414 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 415 set_control( iftrue ); 416 if (unc) { 417 repush_if_args(); 418 uncommon_trap(Deoptimization::Reason_unstable_if, 419 Deoptimization::Action_reinterpret, 420 NULL, 421 "taken always"); 422 } else { 423 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 424 profile_switch_case(prof_table_index); 425 merge_new_path(dest_bci_if_true); 426 } 427 } 428 429 // False branch 430 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 431 set_control( iffalse ); 432 } 433 434 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 435 // True branch, use existing map info 436 { PreserveJVMState pjvms(this); 437 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 438 set_control( iffalse ); 439 if (unc) { 440 repush_if_args(); 441 uncommon_trap(Deoptimization::Reason_unstable_if, 442 Deoptimization::Action_reinterpret, 443 NULL, 444 "taken never"); 445 } else { 446 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 447 profile_switch_case(prof_table_index); 448 merge_new_path(dest_bci_if_true); 449 } 450 } 451 452 // False branch 453 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 454 set_control( iftrue ); 455 } 456 457 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index, bool unc) { 458 // False branch, use existing map and control() 459 if (unc) { 460 repush_if_args(); 461 uncommon_trap(Deoptimization::Reason_unstable_if, 462 Deoptimization::Action_reinterpret, 463 NULL, 464 "taken never"); 465 } else { 466 assert(dest_bci != never_reached, "inconsistent dest"); 467 profile_switch_case(prof_table_index); 468 merge_new_path(dest_bci); 469 } 470 } 471 472 473 extern "C" { 474 static int jint_cmp(const void *i, const void *j) { 475 int a = *(jint *)i; 476 int b = *(jint *)j; 477 return a > b ? 1 : a < b ? -1 : 0; 478 } 479 } 480 481 482 // Default value for methodData switch indexing. Must be a negative value to avoid 483 // conflict with any legal switch index. 484 #define NullTableIndex -1 485 486 class SwitchRange : public StackObj { 487 // a range of integers coupled with a bci destination 488 jint _lo; // inclusive lower limit 489 jint _hi; // inclusive upper limit 490 int _dest; 491 int _table_index; // index into method data table 492 float _cnt; // how many times this range was hit according to profiling 493 494 public: 495 jint lo() const { return _lo; } 496 jint hi() const { return _hi; } 497 int dest() const { return _dest; } 498 int table_index() const { return _table_index; } 499 bool is_singleton() const { return _lo == _hi; } 500 float cnt() const { return _cnt; } 501 502 void setRange(jint lo, jint hi, int dest, int table_index, float cnt) { 503 assert(lo <= hi, "must be a non-empty range"); 504 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; _cnt = cnt; 505 assert(_cnt >= 0, ""); 506 } 507 bool adjoinRange(jint lo, jint hi, int dest, int table_index, float cnt, bool trim_ranges) { 508 assert(lo <= hi, "must be a non-empty range"); 509 if (lo == _hi+1 && table_index == _table_index) { 510 // see merge_ranges() comment below 511 if (trim_ranges) { 512 if (cnt == 0) { 513 if (_cnt != 0) { 514 return false; 515 } 516 if (dest != _dest) { 517 _dest = never_reached; 518 } 519 } else { 520 if (_cnt == 0) { 521 return false; 522 } 523 if (dest != _dest) { 524 return false; 525 } 526 } 527 } else { 528 if (dest != _dest) { 529 return false; 530 } 531 } 532 _hi = hi; 533 _cnt += cnt; 534 return true; 535 } 536 return false; 537 } 538 539 void set (jint value, int dest, int table_index, float cnt) { 540 setRange(value, value, dest, table_index, cnt); 541 } 542 bool adjoin(jint value, int dest, int table_index, float cnt, bool trim_ranges) { 543 return adjoinRange(value, value, dest, table_index, cnt, trim_ranges); 544 } 545 bool adjoin(SwitchRange& other) { 546 return adjoinRange(other._lo, other._hi, other._dest, other._table_index, other._cnt, false); 547 } 548 549 void print() { 550 if (is_singleton()) 551 tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt()); 552 else if (lo() == min_jint) 553 tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt()); 554 else if (hi() == max_jint) 555 tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt()); 556 else 557 tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt()); 558 } 559 }; 560 561 // We try to minimize the number of ranges and the size of the taken 562 // ones using profiling data. When ranges are created, 563 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge 564 // if both were never hit or both were hit to build longer unreached 565 // ranges. Here, we now merge adjoining ranges with the same 566 // destination and finally set destination of unreached ranges to the 567 // special value never_reached because it can help minimize the number 568 // of tests that are necessary. 569 // 570 // For instance: 571 // [0, 1] to target1 sometimes taken 572 // [1, 2] to target1 never taken 573 // [2, 3] to target2 never taken 574 // would lead to: 575 // [0, 1] to target1 sometimes taken 576 // [1, 3] never taken 577 // 578 // (first 2 ranges to target1 are not merged) 579 static void merge_ranges(SwitchRange* ranges, int& rp) { 580 if (rp == 0) { 581 return; 582 } 583 int shift = 0; 584 for (int j = 0; j < rp; j++) { 585 SwitchRange& r1 = ranges[j-shift]; 586 SwitchRange& r2 = ranges[j+1]; 587 if (r1.adjoin(r2)) { 588 shift++; 589 } else if (shift > 0) { 590 ranges[j+1-shift] = r2; 591 } 592 } 593 rp -= shift; 594 for (int j = 0; j <= rp; j++) { 595 SwitchRange& r = ranges[j]; 596 if (r.cnt() == 0 && r.dest() != never_reached) { 597 r.setRange(r.lo(), r.hi(), never_reached, r.table_index(), r.cnt()); 598 } 599 } 600 } 601 602 //-------------------------------do_tableswitch-------------------------------- 603 void Parse::do_tableswitch() { 604 Node* lookup = pop(); 605 // Get information about tableswitch 606 int default_dest = iter().get_dest_table(0); 607 int lo_index = iter().get_int_table(1); 608 int hi_index = iter().get_int_table(2); 609 int len = hi_index - lo_index + 1; 610 611 if (len < 1) { 612 // If this is a backward branch, add safepoint 613 maybe_add_safepoint(default_dest); 614 merge(default_dest); 615 return; 616 } 617 618 ciMethodData* methodData = method()->method_data(); 619 ciMultiBranchData* profile = NULL; 620 if (methodData->is_mature() && UseSwitchProfiling) { 621 ciProfileData* data = methodData->bci_to_data(bci()); 622 if (data != NULL && data->is_MultiBranchData()) { 623 profile = (ciMultiBranchData*)data; 624 } 625 } 626 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 627 628 // generate decision tree, using trichotomy when possible 629 int rnum = len+2; 630 bool makes_backward_branch = false; 631 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 632 int rp = -1; 633 if (lo_index != min_jint) { 634 uint cnt = 1; 635 if (profile != NULL) { 636 cnt = profile->default_count() / (hi_index != max_jint ? 2 : 1); 637 } 638 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex, cnt); 639 } 640 for (int j = 0; j < len; j++) { 641 jint match_int = lo_index+j; 642 int dest = iter().get_dest_table(j+3); 643 makes_backward_branch |= (dest <= bci()); 644 int table_index = method_data_update() ? j : NullTableIndex; 645 uint cnt = 1; 646 if (profile != NULL) { 647 cnt = profile->count_at(j); 648 } 649 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 650 ranges[++rp].set(match_int, dest, table_index, cnt); 651 } 652 } 653 jint highest = lo_index+(len-1); 654 assert(ranges[rp].hi() == highest, ""); 655 if (highest != max_jint) { 656 uint cnt = 1; 657 if (profile != NULL) { 658 cnt = profile->default_count() / (lo_index != min_jint ? 2 : 1); 659 } 660 if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, cnt, trim_ranges)) { 661 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, cnt); 662 } 663 } 664 assert(rp < len+2, "not too many ranges"); 665 666 if (trim_ranges) { 667 merge_ranges(ranges, rp); 668 } 669 670 // Safepoint in case if backward branch observed 671 if( makes_backward_branch && UseLoopSafepoints ) 672 add_safepoint(); 673 674 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 675 } 676 677 678 //------------------------------do_lookupswitch-------------------------------- 679 void Parse::do_lookupswitch() { 680 Node *lookup = pop(); // lookup value 681 // Get information about lookupswitch 682 int default_dest = iter().get_dest_table(0); 683 int len = iter().get_int_table(1); 684 685 if (len < 1) { // If this is a backward branch, add safepoint 686 maybe_add_safepoint(default_dest); 687 merge(default_dest); 688 return; 689 } 690 691 ciMethodData* methodData = method()->method_data(); 692 ciMultiBranchData* profile = NULL; 693 if (methodData->is_mature() && UseSwitchProfiling) { 694 ciProfileData* data = methodData->bci_to_data(bci()); 695 if (data != NULL && data->is_MultiBranchData()) { 696 profile = (ciMultiBranchData*)data; 697 } 698 } 699 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 700 701 // generate decision tree, using trichotomy when possible 702 jint* table = NEW_RESOURCE_ARRAY(jint, len*3); 703 { 704 for (int j = 0; j < len; j++) { 705 table[3*j+0] = iter().get_int_table(2+2*j); 706 table[3*j+1] = iter().get_dest_table(2+2*j+1); 707 table[3*j+2] = profile == NULL ? 1 : profile->count_at(j); 708 } 709 qsort(table, len, 3*sizeof(table[0]), jint_cmp); 710 } 711 712 float defaults = 0; 713 jint prev = min_jint; 714 for (int j = 0; j < len; j++) { 715 jint match_int = table[3*j+0]; 716 if (match_int != prev) { 717 defaults += (float)match_int - prev; 718 } 719 prev = match_int+1; 720 } 721 if (prev-1 != max_jint) { 722 defaults += (float)max_jint - prev + 1; 723 } 724 float default_cnt = 1; 725 if (profile != NULL) { 726 default_cnt = profile->default_count()/defaults; 727 } 728 729 int rnum = len*2+1; 730 bool makes_backward_branch = false; 731 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 732 int rp = -1; 733 for (int j = 0; j < len; j++) { 734 jint match_int = table[3*j+0]; 735 int dest = table[3*j+1]; 736 int cnt = table[3*j+2]; 737 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 738 int table_index = method_data_update() ? j : NullTableIndex; 739 makes_backward_branch |= (dest <= bci()); 740 float c = default_cnt * ((float)match_int - next_lo); 741 if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, NullTableIndex, c, trim_ranges))) { 742 assert(default_dest != never_reached, "sentinel value for dead destinations"); 743 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex, c); 744 } 745 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 746 assert(dest != never_reached, "sentinel value for dead destinations"); 747 ranges[++rp].set(match_int, dest, table_index, cnt); 748 } 749 } 750 jint highest = table[3*(len-1)]; 751 assert(ranges[rp].hi() == highest, ""); 752 if (highest != max_jint && 753 !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest), trim_ranges)) { 754 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest)); 755 } 756 assert(rp < rnum, "not too many ranges"); 757 758 if (trim_ranges) { 759 merge_ranges(ranges, rp); 760 } 761 762 // Safepoint in case backward branch observed 763 if (makes_backward_branch && UseLoopSafepoints) 764 add_safepoint(); 765 766 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 767 } 768 769 static float if_prob(float taken_cnt, float total_cnt) { 770 assert(taken_cnt <= total_cnt, ""); 771 if (total_cnt == 0) { 772 return PROB_FAIR; 773 } 774 float p = taken_cnt / total_cnt; 775 return MIN2(MAX2(p, PROB_MIN), PROB_MAX); 776 } 777 778 static float if_cnt(float cnt) { 779 if (cnt == 0) { 780 return COUNT_UNKNOWN; 781 } 782 return cnt; 783 } 784 785 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) { 786 float total_cnt = 0; 787 for (SwitchRange* sr = lo; sr <= hi; sr++) { 788 total_cnt += sr->cnt(); 789 } 790 return total_cnt; 791 } 792 793 class SwitchRanges : public ResourceObj { 794 public: 795 SwitchRange* _lo; 796 SwitchRange* _hi; 797 SwitchRange* _mid; 798 float _cost; 799 800 enum { 801 Start, 802 LeftDone, 803 RightDone, 804 Done 805 } _state; 806 807 SwitchRanges(SwitchRange *lo, SwitchRange *hi) 808 : _lo(lo), _hi(hi), _mid(NULL), 809 _cost(0), _state(Start) { 810 } 811 812 SwitchRanges() 813 : _lo(NULL), _hi(NULL), _mid(NULL), 814 _cost(0), _state(Start) {} 815 }; 816 817 // Estimate cost of performing a binary search on lo..hi 818 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) { 819 GrowableArray<SwitchRanges> tree; 820 SwitchRanges root(lo, hi); 821 tree.push(root); 822 823 float cost = 0; 824 do { 825 SwitchRanges& r = *tree.adr_at(tree.length()-1); 826 if (r._hi != r._lo) { 827 if (r._mid == NULL) { 828 float r_cnt = sum_of_cnts(r._lo, r._hi); 829 830 if (r_cnt == 0) { 831 tree.pop(); 832 cost = 0; 833 continue; 834 } 835 836 SwitchRange* mid = NULL; 837 mid = r._lo; 838 for (float cnt = 0; ; ) { 839 assert(mid <= r._hi, "out of bounds"); 840 cnt += mid->cnt(); 841 if (cnt > r_cnt / 2) { 842 break; 843 } 844 mid++; 845 } 846 assert(mid <= r._hi, "out of bounds"); 847 r._mid = mid; 848 r._cost = r_cnt / total_cnt; 849 } 850 r._cost += cost; 851 if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) { 852 cost = 0; 853 r._state = SwitchRanges::LeftDone; 854 tree.push(SwitchRanges(r._lo, r._mid-1)); 855 } else if (r._state < SwitchRanges::RightDone) { 856 cost = 0; 857 r._state = SwitchRanges::RightDone; 858 tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi)); 859 } else { 860 tree.pop(); 861 cost = r._cost; 862 } 863 } else { 864 tree.pop(); 865 cost = r._cost; 866 } 867 } while (tree.length() > 0); 868 869 870 return cost; 871 } 872 873 // It sometimes pays off to test most common ranges before the binary search 874 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) { 875 uint nr = hi - lo + 1; 876 float total_cnt = sum_of_cnts(lo, hi); 877 878 float min = compute_tree_cost(lo, hi, total_cnt); 879 float extra = 1; 880 float sub = 0; 881 882 SwitchRange* array1 = lo; 883 SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr); 884 885 SwitchRange* ranges = NULL; 886 887 while (nr >= 2) { 888 assert(lo == array1 || lo == array2, "one the 2 already allocated arrays"); 889 ranges = (lo == array1) ? array2 : array1; 890 891 // Find highest frequency range 892 SwitchRange* candidate = lo; 893 for (SwitchRange* sr = lo+1; sr <= hi; sr++) { 894 if (sr->cnt() > candidate->cnt()) { 895 candidate = sr; 896 } 897 } 898 SwitchRange most_freq = *candidate; 899 if (most_freq.cnt() == 0) { 900 break; 901 } 902 903 // Copy remaining ranges into another array 904 int shift = 0; 905 for (uint i = 0; i < nr; i++) { 906 SwitchRange* sr = &lo[i]; 907 if (sr != candidate) { 908 ranges[i-shift] = *sr; 909 } else { 910 shift++; 911 if (i > 0 && i < nr-1) { 912 SwitchRange prev = lo[i-1]; 913 prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.table_index(), prev.cnt()); 914 if (prev.adjoin(lo[i+1])) { 915 shift++; 916 i++; 917 } 918 ranges[i-shift] = prev; 919 } 920 } 921 } 922 nr -= shift; 923 924 // Evaluate cost of testing the most common range and performing a 925 // binary search on the other ranges 926 float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt); 927 if (cost >= min) { 928 break; 929 } 930 // swap arrays 931 lo = &ranges[0]; 932 hi = &ranges[nr-1]; 933 934 // It pays off: emit the test for the most common range 935 assert(most_freq.cnt() > 0, "must be taken"); 936 Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo()))); 937 Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo()))); 938 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le)); 939 IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt())); 940 jump_if_true_fork(iff, most_freq.dest(), most_freq.table_index(), false); 941 942 sub += most_freq.cnt() / total_cnt; 943 extra += 1 - sub; 944 min = cost; 945 } 946 } 947 948 //----------------------------create_jump_tables------------------------------- 949 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 950 // Are jumptables enabled 951 if (!UseJumpTables) return false; 952 953 // Are jumptables supported 954 if (!Matcher::has_match_rule(Op_Jump)) return false; 955 956 // Don't make jump table if profiling 957 if (method_data_update()) return false; 958 959 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 960 961 // Decide if a guard is needed to lop off big ranges at either (or 962 // both) end(s) of the input set. We'll call this the default target 963 // even though we can't be sure that it is the true "default". 964 965 bool needs_guard = false; 966 int default_dest; 967 int64_t total_outlier_size = 0; 968 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 969 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 970 971 if (lo->dest() == hi->dest()) { 972 total_outlier_size = hi_size + lo_size; 973 default_dest = lo->dest(); 974 } else if (lo_size > hi_size) { 975 total_outlier_size = lo_size; 976 default_dest = lo->dest(); 977 } else { 978 total_outlier_size = hi_size; 979 default_dest = hi->dest(); 980 } 981 982 float total = sum_of_cnts(lo, hi); 983 float cost = compute_tree_cost(lo, hi, total); 984 985 // If a guard test will eliminate very sparse end ranges, then 986 // it is worth the cost of an extra jump. 987 float trimmed_cnt = 0; 988 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 989 needs_guard = true; 990 if (default_dest == lo->dest()) { 991 trimmed_cnt += lo->cnt(); 992 lo++; 993 } 994 if (default_dest == hi->dest()) { 995 trimmed_cnt += hi->cnt(); 996 hi--; 997 } 998 } 999 1000 // Find the total number of cases and ranges 1001 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 1002 int num_range = hi - lo + 1; 1003 1004 // Don't create table if: too large, too small, or too sparse. 1005 if (num_cases > MaxJumpTableSize) 1006 return false; 1007 if (UseSwitchProfiling) { 1008 // MinJumpTableSize is set so with a well balanced binary tree, 1009 // when the number of ranges is MinJumpTableSize, it's cheaper to 1010 // go through a JumpNode that a tree of IfNodes. Average cost of a 1011 // tree of IfNodes with MinJumpTableSize is 1012 // log2f(MinJumpTableSize) comparisons. So if the cost computed 1013 // from profile data is less than log2f(MinJumpTableSize) then 1014 // going with the binary search is cheaper. 1015 if (cost < log2f(MinJumpTableSize)) { 1016 return false; 1017 } 1018 } else { 1019 if (num_cases < MinJumpTableSize) 1020 return false; 1021 } 1022 if (num_cases > (MaxJumpTableSparseness * num_range)) 1023 return false; 1024 1025 // Normalize table lookups to zero 1026 int lowval = lo->lo(); 1027 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 1028 1029 // Generate a guard to protect against input keyvals that aren't 1030 // in the switch domain. 1031 if (needs_guard) { 1032 Node* size = _gvn.intcon(num_cases); 1033 Node* cmp = _gvn.transform(new CmpUNode(key_val, size)); 1034 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge)); 1035 IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt)); 1036 jump_if_true_fork(iff, default_dest, NullTableIndex, trim_ranges && trimmed_cnt == 0); 1037 1038 total -= trimmed_cnt; 1039 } 1040 1041 // Create an ideal node JumpTable that has projections 1042 // of all possible ranges for a switch statement 1043 // The key_val input must be converted to a pointer offset and scaled. 1044 // Compare Parse::array_addressing above. 1045 1046 // Clean the 32-bit int into a real 64-bit offset. 1047 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 1048 const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); 1049 // Make I2L conversion control dependent to prevent it from 1050 // floating above the range check during loop optimizations. 1051 key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); 1052 1053 // Shift the value by wordsize so we have an index into the table, rather 1054 // than a switch value 1055 Node *shiftWord = _gvn.MakeConX(wordSize); 1056 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 1057 1058 // Create the JumpNode 1059 Arena* arena = C->comp_arena(); 1060 float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases); 1061 int i = 0; 1062 if (total == 0) { 1063 for (SwitchRange* r = lo; r <= hi; r++) { 1064 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1065 probs[i] = 1.0F / num_cases; 1066 } 1067 } 1068 } else { 1069 for (SwitchRange* r = lo; r <= hi; r++) { 1070 float prob = r->cnt()/total; 1071 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1072 probs[i] = prob / (r->hi() - r->lo() + 1); 1073 } 1074 } 1075 } 1076 1077 ciMethodData* methodData = method()->method_data(); 1078 ciMultiBranchData* profile = NULL; 1079 if (methodData->is_mature()) { 1080 ciProfileData* data = methodData->bci_to_data(bci()); 1081 if (data != NULL && data->is_MultiBranchData()) { 1082 profile = (ciMultiBranchData*)data; 1083 } 1084 } 1085 1086 Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total)); 1087 1088 // These are the switch destinations hanging off the jumpnode 1089 i = 0; 1090 for (SwitchRange* r = lo; r <= hi; r++) { 1091 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1092 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 1093 { 1094 PreserveJVMState pjvms(this); 1095 set_control(input); 1096 jump_if_always_fork(r->dest(), r->table_index(), trim_ranges && r->cnt() == 0); 1097 } 1098 } 1099 } 1100 assert(i == num_cases, "miscount of cases"); 1101 stop_and_kill_map(); // no more uses for this JVMS 1102 return true; 1103 } 1104 1105 //----------------------------jump_switch_ranges------------------------------- 1106 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 1107 Block* switch_block = block(); 1108 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 1109 1110 if (switch_depth == 0) { 1111 // Do special processing for the top-level call. 1112 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 1113 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 1114 1115 // Decrement pred-numbers for the unique set of nodes. 1116 #ifdef ASSERT 1117 if (!trim_ranges) { 1118 // Ensure that the block's successors are a (duplicate-free) set. 1119 int successors_counted = 0; // block occurrences in [hi..lo] 1120 int unique_successors = switch_block->num_successors(); 1121 for (int i = 0; i < unique_successors; i++) { 1122 Block* target = switch_block->successor_at(i); 1123 1124 // Check that the set of successors is the same in both places. 1125 int successors_found = 0; 1126 for (SwitchRange* p = lo; p <= hi; p++) { 1127 if (p->dest() == target->start()) successors_found++; 1128 } 1129 assert(successors_found > 0, "successor must be known"); 1130 successors_counted += successors_found; 1131 } 1132 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 1133 } 1134 #endif 1135 1136 // Maybe prune the inputs, based on the type of key_val. 1137 jint min_val = min_jint; 1138 jint max_val = max_jint; 1139 const TypeInt* ti = key_val->bottom_type()->isa_int(); 1140 if (ti != NULL) { 1141 min_val = ti->_lo; 1142 max_val = ti->_hi; 1143 assert(min_val <= max_val, "invalid int type"); 1144 } 1145 while (lo->hi() < min_val) { 1146 lo++; 1147 } 1148 if (lo->lo() < min_val) { 1149 lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index(), lo->cnt()); 1150 } 1151 while (hi->lo() > max_val) { 1152 hi--; 1153 } 1154 if (hi->hi() > max_val) { 1155 hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index(), hi->cnt()); 1156 } 1157 1158 linear_search_switch_ranges(key_val, lo, hi); 1159 } 1160 1161 #ifndef PRODUCT 1162 if (switch_depth == 0) { 1163 _max_switch_depth = 0; 1164 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 1165 } 1166 #endif 1167 1168 assert(lo <= hi, "must be a non-empty set of ranges"); 1169 if (lo == hi) { 1170 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1171 } else { 1172 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 1173 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 1174 1175 if (create_jump_tables(key_val, lo, hi)) return; 1176 1177 SwitchRange* mid = NULL; 1178 float total_cnt = sum_of_cnts(lo, hi); 1179 1180 int nr = hi - lo + 1; 1181 if (UseSwitchProfiling) { 1182 // Don't keep the binary search tree balanced: pick up mid point 1183 // that split frequencies in half. 1184 float cnt = 0; 1185 for (SwitchRange* sr = lo; sr <= hi; sr++) { 1186 cnt += sr->cnt(); 1187 if (cnt >= total_cnt / 2) { 1188 mid = sr; 1189 break; 1190 } 1191 } 1192 } else { 1193 mid = lo + nr/2; 1194 1195 // if there is an easy choice, pivot at a singleton: 1196 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 1197 1198 assert(lo < mid && mid <= hi, "good pivot choice"); 1199 assert(nr != 2 || mid == hi, "should pick higher of 2"); 1200 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 1201 } 1202 1203 1204 Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo()); 1205 1206 if (mid->is_singleton()) { 1207 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt())); 1208 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index(), trim_ranges && mid->cnt() == 0); 1209 1210 // Special Case: If there are exactly three ranges, and the high 1211 // and low range each go to the same place, omit the "gt" test, 1212 // since it will not discriminate anything. 1213 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo; 1214 1215 // if there is a higher range, test for it and process it: 1216 if (mid < hi && !eq_test_only) { 1217 // two comparisons of same values--should enable 1 test for 2 branches 1218 // Use BoolTest::le instead of BoolTest::gt 1219 float cnt = sum_of_cnts(lo, mid-1); 1220 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le, if_prob(cnt, total_cnt), if_cnt(cnt)); 1221 Node *iftrue = _gvn.transform( new IfTrueNode(iff_le) ); 1222 Node *iffalse = _gvn.transform( new IfFalseNode(iff_le) ); 1223 { PreserveJVMState pjvms(this); 1224 set_control(iffalse); 1225 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 1226 } 1227 set_control(iftrue); 1228 } 1229 1230 } else { 1231 // mid is a range, not a singleton, so treat mid..hi as a unit 1232 float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi); 1233 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt)); 1234 1235 // if there is a higher range, test for it and process it: 1236 if (mid == hi) { 1237 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index(), trim_ranges && cnt == 0); 1238 } else { 1239 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 1240 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 1241 { PreserveJVMState pjvms(this); 1242 set_control(iftrue); 1243 jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1); 1244 } 1245 set_control(iffalse); 1246 } 1247 } 1248 1249 // in any case, process the lower range 1250 if (mid == lo) { 1251 if (mid->is_singleton()) { 1252 jump_switch_ranges(key_val, lo+1, hi, switch_depth+1); 1253 } else { 1254 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1255 } 1256 } else { 1257 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 1258 } 1259 } 1260 1261 // Decrease pred_count for each successor after all is done. 1262 if (switch_depth == 0) { 1263 int unique_successors = switch_block->num_successors(); 1264 for (int i = 0; i < unique_successors; i++) { 1265 Block* target = switch_block->successor_at(i); 1266 // Throw away the pre-allocated path for each unique successor. 1267 target->next_path_num(); 1268 } 1269 } 1270 1271 #ifndef PRODUCT 1272 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 1273 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 1274 SwitchRange* r; 1275 int nsing = 0; 1276 for( r = lo; r <= hi; r++ ) { 1277 if( r->is_singleton() ) nsing++; 1278 } 1279 tty->print(">>> "); 1280 _method->print_short_name(); 1281 tty->print_cr(" switch decision tree"); 1282 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 1283 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 1284 if (_max_switch_depth > _est_switch_depth) { 1285 tty->print_cr("******** BAD SWITCH DEPTH ********"); 1286 } 1287 tty->print(" "); 1288 for( r = lo; r <= hi; r++ ) { 1289 r->print(); 1290 } 1291 tty->cr(); 1292 } 1293 #endif 1294 } 1295 1296 void Parse::modf() { 1297 Node *f2 = pop(); 1298 Node *f1 = pop(); 1299 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 1300 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 1301 "frem", NULL, //no memory effects 1302 f1, f2); 1303 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1304 1305 push(res); 1306 } 1307 1308 void Parse::modd() { 1309 Node *d2 = pop_pair(); 1310 Node *d1 = pop_pair(); 1311 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 1312 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 1313 "drem", NULL, //no memory effects 1314 d1, top(), d2, top()); 1315 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1316 1317 #ifdef ASSERT 1318 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 1319 assert(res_top == top(), "second value must be top"); 1320 #endif 1321 1322 push_pair(res_d); 1323 } 1324 1325 void Parse::l2f() { 1326 Node* f2 = pop(); 1327 Node* f1 = pop(); 1328 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 1329 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 1330 "l2f", NULL, //no memory effects 1331 f1, f2); 1332 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1333 1334 push(res); 1335 } 1336 1337 void Parse::do_irem() { 1338 // Must keep both values on the expression-stack during null-check 1339 zero_check_int(peek()); 1340 // Compile-time detect of null-exception? 1341 if (stopped()) return; 1342 1343 Node* b = pop(); 1344 Node* a = pop(); 1345 1346 const Type *t = _gvn.type(b); 1347 if (t != Type::TOP) { 1348 const TypeInt *ti = t->is_int(); 1349 if (ti->is_con()) { 1350 int divisor = ti->get_con(); 1351 // check for positive power of 2 1352 if (divisor > 0 && 1353 (divisor & ~(divisor-1)) == divisor) { 1354 // yes ! 1355 Node *mask = _gvn.intcon((divisor - 1)); 1356 // Sigh, must handle negative dividends 1357 Node *zero = _gvn.intcon(0); 1358 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt, PROB_FAIR, COUNT_UNKNOWN); 1359 Node *iff = _gvn.transform( new IfFalseNode(ifff) ); 1360 Node *ift = _gvn.transform( new IfTrueNode (ifff) ); 1361 Node *reg = jump_if_join(ift, iff); 1362 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 1363 // Negative path; negate/and/negate 1364 Node *neg = _gvn.transform( new SubINode(zero, a) ); 1365 Node *andn= _gvn.transform( new AndINode(neg, mask) ); 1366 Node *negn= _gvn.transform( new SubINode(zero, andn) ); 1367 phi->init_req(1, negn); 1368 // Fast positive case 1369 Node *andx = _gvn.transform( new AndINode(a, mask) ); 1370 phi->init_req(2, andx); 1371 // Push the merge 1372 push( _gvn.transform(phi) ); 1373 return; 1374 } 1375 } 1376 } 1377 // Default case 1378 push( _gvn.transform( new ModINode(control(),a,b) ) ); 1379 } 1380 1381 // Handle jsr and jsr_w bytecode 1382 void Parse::do_jsr() { 1383 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 1384 1385 // Store information about current state, tagged with new _jsr_bci 1386 int return_bci = iter().next_bci(); 1387 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 1388 1389 // Update method data 1390 profile_taken_branch(jsr_bci); 1391 1392 // The way we do things now, there is only one successor block 1393 // for the jsr, because the target code is cloned by ciTypeFlow. 1394 Block* target = successor_for_bci(jsr_bci); 1395 1396 // What got pushed? 1397 const Type* ret_addr = target->peek(); 1398 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 1399 1400 // Effect on jsr on stack 1401 push(_gvn.makecon(ret_addr)); 1402 1403 // Flow to the jsr. 1404 merge(jsr_bci); 1405 } 1406 1407 // Handle ret bytecode 1408 void Parse::do_ret() { 1409 // Find to whom we return. 1410 assert(block()->num_successors() == 1, "a ret can only go one place now"); 1411 Block* target = block()->successor_at(0); 1412 assert(!target->is_ready(), "our arrival must be expected"); 1413 profile_ret(target->flow()->start()); 1414 int pnum = target->next_path_num(); 1415 merge_common(target, pnum); 1416 } 1417 1418 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 1419 if (btest != BoolTest::eq && btest != BoolTest::ne) { 1420 // Only ::eq and ::ne are supported for profile injection. 1421 return false; 1422 } 1423 if (test->is_Cmp() && 1424 test->in(1)->Opcode() == Op_ProfileBoolean) { 1425 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 1426 int false_cnt = profile->false_count(); 1427 int true_cnt = profile->true_count(); 1428 1429 // Counts matching depends on the actual test operation (::eq or ::ne). 1430 // No need to scale the counts because profile injection was designed 1431 // to feed exact counts into VM. 1432 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 1433 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 1434 1435 profile->consume(); 1436 return true; 1437 } 1438 return false; 1439 } 1440 //--------------------------dynamic_branch_prediction-------------------------- 1441 // Try to gather dynamic branch prediction behavior. Return a probability 1442 // of the branch being taken and set the "cnt" field. Returns a -1.0 1443 // if we need to use static prediction for some reason. 1444 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 1445 ResourceMark rm; 1446 1447 cnt = COUNT_UNKNOWN; 1448 1449 int taken = 0; 1450 int not_taken = 0; 1451 1452 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 1453 1454 if (use_mdo) { 1455 // Use MethodData information if it is available 1456 // FIXME: free the ProfileData structure 1457 ciMethodData* methodData = method()->method_data(); 1458 if (!methodData->is_mature()) return PROB_UNKNOWN; 1459 ciProfileData* data = methodData->bci_to_data(bci()); 1460 if (data == NULL) { 1461 return PROB_UNKNOWN; 1462 } 1463 if (!data->is_JumpData()) return PROB_UNKNOWN; 1464 1465 // get taken and not taken values 1466 taken = data->as_JumpData()->taken(); 1467 not_taken = 0; 1468 if (data->is_BranchData()) { 1469 not_taken = data->as_BranchData()->not_taken(); 1470 } 1471 1472 // scale the counts to be commensurate with invocation counts: 1473 taken = method()->scale_count(taken); 1474 not_taken = method()->scale_count(not_taken); 1475 } 1476 1477 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 1478 // We also check that individual counters are positive first, otherwise the sum can become positive. 1479 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 1480 if (C->log() != NULL) { 1481 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 1482 } 1483 return PROB_UNKNOWN; 1484 } 1485 1486 // Compute frequency that we arrive here 1487 float sum = taken + not_taken; 1488 // Adjust, if this block is a cloned private block but the 1489 // Jump counts are shared. Taken the private counts for 1490 // just this path instead of the shared counts. 1491 if( block()->count() > 0 ) 1492 sum = block()->count(); 1493 cnt = sum / FreqCountInvocations; 1494 1495 // Pin probability to sane limits 1496 float prob; 1497 if( !taken ) 1498 prob = (0+PROB_MIN) / 2; 1499 else if( !not_taken ) 1500 prob = (1+PROB_MAX) / 2; 1501 else { // Compute probability of true path 1502 prob = (float)taken / (float)(taken + not_taken); 1503 if (prob > PROB_MAX) prob = PROB_MAX; 1504 if (prob < PROB_MIN) prob = PROB_MIN; 1505 } 1506 1507 assert((cnt > 0.0f) && (prob > 0.0f), 1508 "Bad frequency assignment in if"); 1509 1510 if (C->log() != NULL) { 1511 const char* prob_str = NULL; 1512 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 1513 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 1514 char prob_str_buf[30]; 1515 if (prob_str == NULL) { 1516 jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob); 1517 prob_str = prob_str_buf; 1518 } 1519 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 1520 iter().get_dest(), taken, not_taken, cnt, prob_str); 1521 } 1522 return prob; 1523 } 1524 1525 //-----------------------------branch_prediction------------------------------- 1526 float Parse::branch_prediction(float& cnt, 1527 BoolTest::mask btest, 1528 int target_bci, 1529 Node* test) { 1530 float prob = dynamic_branch_prediction(cnt, btest, test); 1531 // If prob is unknown, switch to static prediction 1532 if (prob != PROB_UNKNOWN) return prob; 1533 1534 prob = PROB_FAIR; // Set default value 1535 if (btest == BoolTest::eq) // Exactly equal test? 1536 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 1537 else if (btest == BoolTest::ne) 1538 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 1539 1540 // If this is a conditional test guarding a backwards branch, 1541 // assume its a loop-back edge. Make it a likely taken branch. 1542 if (target_bci < bci()) { 1543 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 1544 // Since it's an OSR, we probably have profile data, but since 1545 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 1546 // Let's make a special check here for completely zero counts. 1547 ciMethodData* methodData = method()->method_data(); 1548 if (!methodData->is_empty()) { 1549 ciProfileData* data = methodData->bci_to_data(bci()); 1550 // Only stop for truly zero counts, which mean an unknown part 1551 // of the OSR-ed method, and we want to deopt to gather more stats. 1552 // If you have ANY counts, then this loop is simply 'cold' relative 1553 // to the OSR loop. 1554 if (data == NULL || 1555 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 1556 // This is the only way to return PROB_UNKNOWN: 1557 return PROB_UNKNOWN; 1558 } 1559 } 1560 } 1561 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 1562 } 1563 1564 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 1565 return prob; 1566 } 1567 1568 // The magic constants are chosen so as to match the output of 1569 // branch_prediction() when the profile reports a zero taken count. 1570 // It is important to distinguish zero counts unambiguously, because 1571 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 1572 // very small but nonzero probabilities, which if confused with zero 1573 // counts would keep the program recompiling indefinitely. 1574 bool Parse::seems_never_taken(float prob) const { 1575 return prob < PROB_MIN; 1576 } 1577 1578 // True if the comparison seems to be the kind that will not change its 1579 // statistics from true to false. See comments in adjust_map_after_if. 1580 // This question is only asked along paths which are already 1581 // classifed as untaken (by seems_never_taken), so really, 1582 // if a path is never taken, its controlling comparison is 1583 // already acting in a stable fashion. If the comparison 1584 // seems stable, we will put an expensive uncommon trap 1585 // on the untaken path. 1586 bool Parse::seems_stable_comparison() const { 1587 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 1588 return false; 1589 } 1590 return true; 1591 } 1592 1593 //-------------------------------repush_if_args-------------------------------- 1594 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 1595 inline int Parse::repush_if_args() { 1596 if (PrintOpto && WizardMode) { 1597 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 1598 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 1599 method()->print_name(); tty->cr(); 1600 } 1601 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 1602 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 1603 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 1604 assert(argument(0) != NULL, "must exist"); 1605 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 1606 inc_sp(bc_depth); 1607 return bc_depth; 1608 } 1609 1610 //----------------------------------do_ifnull---------------------------------- 1611 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 1612 int target_bci = iter().get_dest(); 1613 1614 Block* branch_block = successor_for_bci(target_bci); 1615 Block* next_block = successor_for_bci(iter().next_bci()); 1616 1617 float cnt; 1618 float prob = branch_prediction(cnt, btest, target_bci, c); 1619 if (prob == PROB_UNKNOWN) { 1620 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 1621 if (PrintOpto && Verbose) { 1622 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1623 } 1624 repush_if_args(); // to gather stats on loop 1625 // We need to mark this branch as taken so that if we recompile we will 1626 // see that it is possible. In the tiered system the interpreter doesn't 1627 // do profiling and by the time we get to the lower tier from the interpreter 1628 // the path may be cold again. Make sure it doesn't look untaken 1629 profile_taken_branch(target_bci, !ProfileInterpreter); 1630 uncommon_trap(Deoptimization::Reason_unreached, 1631 Deoptimization::Action_reinterpret, 1632 NULL, "cold"); 1633 if (C->eliminate_boxing()) { 1634 // Mark the successor blocks as parsed 1635 branch_block->next_path_num(); 1636 next_block->next_path_num(); 1637 } 1638 return; 1639 } 1640 1641 NOT_PRODUCT(explicit_null_checks_inserted++); 1642 1643 // Generate real control flow 1644 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1645 1646 // Sanity check the probability value 1647 assert(prob > 0.0f,"Bad probability in Parser"); 1648 // Need xform to put node in hash table 1649 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1650 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1651 // True branch 1652 { PreserveJVMState pjvms(this); 1653 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1654 set_control(iftrue); 1655 1656 if (stopped()) { // Path is dead? 1657 NOT_PRODUCT(explicit_null_checks_elided++); 1658 if (C->eliminate_boxing()) { 1659 // Mark the successor block as parsed 1660 branch_block->next_path_num(); 1661 } 1662 } else { // Path is live. 1663 // Update method data 1664 profile_taken_branch(target_bci); 1665 adjust_map_after_if(btest, c, prob, branch_block); 1666 if (!stopped()) { 1667 merge(target_bci); 1668 } 1669 } 1670 } 1671 1672 // False branch 1673 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1674 set_control(iffalse); 1675 1676 if (stopped()) { // Path is dead? 1677 NOT_PRODUCT(explicit_null_checks_elided++); 1678 if (C->eliminate_boxing()) { 1679 // Mark the successor block as parsed 1680 next_block->next_path_num(); 1681 } 1682 } else { // Path is live. 1683 // Update method data 1684 profile_not_taken_branch(); 1685 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block); 1686 } 1687 } 1688 1689 //------------------------------------do_if------------------------------------ 1690 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) { 1691 int target_bci = iter().get_dest(); 1692 1693 Block* branch_block = successor_for_bci(target_bci); 1694 Block* next_block = successor_for_bci(iter().next_bci()); 1695 1696 float cnt; 1697 float prob = branch_prediction(cnt, btest, target_bci, c); 1698 float untaken_prob = 1.0 - prob; 1699 1700 if (prob == PROB_UNKNOWN) { 1701 if (PrintOpto && Verbose) { 1702 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1703 } 1704 repush_if_args(); // to gather stats on loop 1705 // We need to mark this branch as taken so that if we recompile we will 1706 // see that it is possible. In the tiered system the interpreter doesn't 1707 // do profiling and by the time we get to the lower tier from the interpreter 1708 // the path may be cold again. Make sure it doesn't look untaken 1709 profile_taken_branch(target_bci, !ProfileInterpreter); 1710 uncommon_trap(Deoptimization::Reason_unreached, 1711 Deoptimization::Action_reinterpret, 1712 NULL, "cold"); 1713 if (C->eliminate_boxing()) { 1714 // Mark the successor blocks as parsed 1715 branch_block->next_path_num(); 1716 next_block->next_path_num(); 1717 } 1718 return; 1719 } 1720 1721 // Sanity check the probability value 1722 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1723 1724 bool taken_if_true = true; 1725 // Convert BoolTest to canonical form: 1726 if (!BoolTest(btest).is_canonical()) { 1727 btest = BoolTest(btest).negate(); 1728 taken_if_true = false; 1729 // prob is NOT updated here; it remains the probability of the taken 1730 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1731 } 1732 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1733 1734 Node* tst0 = new BoolNode(c, btest); 1735 Node* tst = _gvn.transform(tst0); 1736 BoolTest::mask taken_btest = BoolTest::illegal; 1737 BoolTest::mask untaken_btest = BoolTest::illegal; 1738 1739 if (tst->is_Bool()) { 1740 // Refresh c from the transformed bool node, since it may be 1741 // simpler than the original c. Also re-canonicalize btest. 1742 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1743 // That can arise from statements like: if (x instanceof C) ... 1744 if (tst != tst0) { 1745 // Canonicalize one more time since transform can change it. 1746 btest = tst->as_Bool()->_test._test; 1747 if (!BoolTest(btest).is_canonical()) { 1748 // Reverse edges one more time... 1749 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1750 btest = tst->as_Bool()->_test._test; 1751 assert(BoolTest(btest).is_canonical(), "sanity"); 1752 taken_if_true = !taken_if_true; 1753 } 1754 c = tst->in(1); 1755 } 1756 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1757 taken_btest = taken_if_true ? btest : neg_btest; 1758 untaken_btest = taken_if_true ? neg_btest : btest; 1759 } 1760 1761 // Generate real control flow 1762 float true_prob = (taken_if_true ? prob : untaken_prob); 1763 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1764 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1765 Node* taken_branch = new IfTrueNode(iff); 1766 Node* untaken_branch = new IfFalseNode(iff); 1767 if (!taken_if_true) { // Finish conversion to canonical form 1768 Node* tmp = taken_branch; 1769 taken_branch = untaken_branch; 1770 untaken_branch = tmp; 1771 } 1772 1773 // Branch is taken: 1774 { PreserveJVMState pjvms(this); 1775 taken_branch = _gvn.transform(taken_branch); 1776 set_control(taken_branch); 1777 1778 if (stopped()) { 1779 if (C->eliminate_boxing() && !new_path) { 1780 // Mark the successor block as parsed (if we haven't created a new path) 1781 branch_block->next_path_num(); 1782 } 1783 } else { 1784 // Update method data 1785 profile_taken_branch(target_bci); 1786 adjust_map_after_if(taken_btest, c, prob, branch_block); 1787 if (!stopped()) { 1788 if (new_path) { 1789 // Merge by using a new path 1790 merge_new_path(target_bci); 1791 } else if (ctrl_taken != NULL) { 1792 // Don't merge but save taken branch to be wired by caller 1793 *ctrl_taken = control(); 1794 } else { 1795 merge(target_bci); 1796 } 1797 } 1798 } 1799 } 1800 1801 untaken_branch = _gvn.transform(untaken_branch); 1802 set_control(untaken_branch); 1803 1804 // Branch not taken. 1805 if (stopped() && ctrl_taken == NULL) { 1806 if (C->eliminate_boxing()) { 1807 // Mark the successor block as parsed (if caller does not re-wire control flow) 1808 next_block->next_path_num(); 1809 } 1810 } else { 1811 // Update method data 1812 profile_not_taken_branch(); 1813 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block); 1814 } 1815 } 1816 1817 void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) { 1818 ciMethod* subst_method = ciEnv::current()->ValueBootstrapMethods_klass()->find_method(ciSymbol::isSubstitutable_name(), ciSymbol::object_object_boolean_signature()); 1819 // If current method is ValueBootstrapMethods::isSubstitutable(), 1820 // compile the acmp as a regular pointer comparison otherwise we 1821 // could call ValueBootstrapMethods::isSubstitutable() back 1822 if (ACmpOnValues == 0 || method() == subst_method) { 1823 Node* cmp = CmpP(a, b); 1824 cmp = optimize_cmp_with_klass(cmp); 1825 do_if(btest, cmp); 1826 return; 1827 } 1828 1829 if (ACmpOnValues == 3) { 1830 // Substituability test 1831 if (a->is_ValueType()) { 1832 inc_sp(2); 1833 a = a->as_ValueType()->allocate(this, true)->get_oop(); 1834 dec_sp(2); 1835 } 1836 if (b->is_ValueType()) { 1837 inc_sp(2); 1838 b = b->as_ValueType()->allocate(this, true)->get_oop(); 1839 dec_sp(2); 1840 } 1841 1842 const TypeOopPtr* ta = _gvn.type(a)->isa_oopptr(); 1843 const TypeOopPtr* tb = _gvn.type(b)->isa_oopptr(); 1844 1845 if (ta == NULL || !ta->can_be_value_type_raw() || 1846 tb == NULL || !tb->can_be_value_type_raw()) { 1847 Node* cmp = CmpP(a, b); 1848 cmp = optimize_cmp_with_klass(cmp); 1849 do_if(btest, cmp); 1850 return; 1851 } 1852 1853 Node* cmp = CmpP(a, b); 1854 cmp = optimize_cmp_with_klass(cmp); 1855 Node* eq_region = NULL; 1856 if (btest == BoolTest::eq) { 1857 do_if(btest, cmp, true); 1858 if (stopped()) { 1859 return; 1860 } 1861 } else { 1862 assert(btest == BoolTest::ne, "only eq or ne"); 1863 Node* is_not_equal = NULL; 1864 eq_region = new RegionNode(3); 1865 { 1866 PreserveJVMState pjvms(this); 1867 do_if(btest, cmp, false, &is_not_equal); 1868 if (!stopped()) { 1869 eq_region->init_req(1, control()); 1870 } 1871 } 1872 if (is_not_equal == NULL || is_not_equal->is_top()) { 1873 record_for_igvn(eq_region); 1874 set_control(_gvn.transform(eq_region)); 1875 return; 1876 } 1877 set_control(is_not_equal); 1878 } 1879 // Pointers not equal, check for values 1880 Node* ne_region = new RegionNode(6); 1881 inc_sp(2); 1882 Node* null_ctl = top(); 1883 Node* not_null_a = null_check_oop(a, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); 1884 dec_sp(2); 1885 ne_region->init_req(1, null_ctl); 1886 if (stopped()) { 1887 record_for_igvn(ne_region); 1888 set_control(_gvn.transform(ne_region)); 1889 if (btest == BoolTest::ne) { 1890 { 1891 PreserveJVMState pjvms(this); 1892 int target_bci = iter().get_dest(); 1893 merge(target_bci); 1894 } 1895 record_for_igvn(eq_region); 1896 set_control(_gvn.transform(eq_region)); 1897 } 1898 return; 1899 } 1900 1901 Node* is_value = is_always_locked(not_null_a); 1902 Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); 1903 Node* is_value_cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); 1904 Node* is_value_bol = _gvn.transform(new BoolNode(is_value_cmp, BoolTest::ne)); 1905 IfNode* is_value_iff = create_and_map_if(control(), is_value_bol, PROB_FAIR, COUNT_UNKNOWN); 1906 Node* not_value = _gvn.transform(new IfTrueNode(is_value_iff)); 1907 set_control(_gvn.transform(new IfFalseNode(is_value_iff))); 1908 ne_region->init_req(2, not_value); 1909 1910 // One of the 2 pointers refers to a value, check if both are of 1911 // the same class 1912 inc_sp(2); 1913 null_ctl = top(); 1914 Node* not_null_b = null_check_oop(b, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); 1915 dec_sp(2); 1916 ne_region->init_req(3, null_ctl); 1917 if (stopped()) { 1918 record_for_igvn(ne_region); 1919 set_control(_gvn.transform(ne_region)); 1920 if (btest == BoolTest::ne) { 1921 { 1922 PreserveJVMState pjvms(this); 1923 int target_bci = iter().get_dest(); 1924 merge(target_bci); 1925 } 1926 record_for_igvn(eq_region); 1927 set_control(_gvn.transform(eq_region)); 1928 } 1929 return; 1930 } 1931 Node* kls_a = load_object_klass(not_null_a); 1932 Node* kls_b = load_object_klass(not_null_b); 1933 Node* kls_cmp = CmpP(kls_a, kls_b); 1934 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne)); 1935 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN); 1936 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff)); 1937 set_control(_gvn.transform(new IfFalseNode(kls_iff))); 1938 ne_region->init_req(4, kls_ne); 1939 1940 if (stopped()) { 1941 record_for_igvn(ne_region); 1942 set_control(_gvn.transform(ne_region)); 1943 if (btest == BoolTest::ne) { 1944 { 1945 PreserveJVMState pjvms(this); 1946 int target_bci = iter().get_dest(); 1947 merge(target_bci); 1948 } 1949 record_for_igvn(eq_region); 1950 set_control(_gvn.transform(eq_region)); 1951 } 1952 return; 1953 } 1954 // Both are values of the same class, we need to perform a 1955 // substitutability test. Delegate to 1956 // ValueBootstrapMethods::isSubstitutable(). 1957 1958 Node* ne_io_phi = PhiNode::make(ne_region, i_o()); 1959 Node* mem = reset_memory(); 1960 Node* ne_mem_phi = PhiNode::make(ne_region, mem); 1961 1962 Node* eq_io_phi = NULL; 1963 Node* eq_mem_phi = NULL; 1964 if (eq_region != NULL) { 1965 eq_io_phi = PhiNode::make(eq_region, i_o()); 1966 eq_mem_phi = PhiNode::make(eq_region, mem); 1967 } 1968 1969 set_all_memory(mem); 1970 1971 kill_dead_locals(); 1972 CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method, bci()); 1973 call->set_override_symbolic_info(true); 1974 call->init_req(TypeFunc::Parms, not_null_a); 1975 call->init_req(TypeFunc::Parms+1, not_null_b); 1976 inc_sp(2); 1977 set_edges_for_java_call(call, false, false); 1978 Node* ret = set_results_for_java_call(call, false, true); 1979 dec_sp(2); 1980 1981 // Test the return value of ValueBootstrapMethods::isSubstitutable() 1982 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1))); 1983 if (btest == BoolTest::eq) { 1984 do_if(btest, subst_cmp); 1985 } else { 1986 assert(btest == BoolTest::ne, "only eq or ne"); 1987 Node* is_not_equal = NULL; 1988 { 1989 PreserveJVMState pjvms(this); 1990 do_if(btest, subst_cmp, false, &is_not_equal); 1991 if (!stopped()) { 1992 eq_region->init_req(2, control()); 1993 eq_io_phi->init_req(2, i_o()); 1994 eq_mem_phi->init_req(2, reset_memory()); 1995 } 1996 } 1997 set_control(is_not_equal); 1998 } 1999 ne_region->init_req(5, control()); 2000 ne_io_phi->init_req(5, i_o()); 2001 ne_mem_phi->init_req(5, reset_memory()); 2002 2003 record_for_igvn(ne_region); 2004 set_control(_gvn.transform(ne_region)); 2005 set_i_o(_gvn.transform(ne_io_phi)); 2006 set_all_memory(_gvn.transform(ne_mem_phi)); 2007 2008 if (btest == BoolTest::ne) { 2009 { 2010 PreserveJVMState pjvms(this); 2011 int target_bci = iter().get_dest(); 2012 merge(target_bci); 2013 } 2014 2015 record_for_igvn(eq_region); 2016 set_control(_gvn.transform(eq_region)); 2017 set_i_o(_gvn.transform(eq_io_phi)); 2018 set_all_memory(_gvn.transform(eq_mem_phi)); 2019 } 2020 2021 return; 2022 } 2023 // In the case were both operands might be value types, we need to 2024 // use the new acmp implementation. Otherwise, i.e. if one operand 2025 // is not a value type, we can use the old acmp implementation. 2026 Node* cmp = C->optimize_acmp(&_gvn, a, b); 2027 if (cmp != NULL) { 2028 // Use optimized/old acmp 2029 cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); 2030 do_if(btest, cmp); 2031 return; 2032 } 2033 2034 Node* ctrl = NULL; 2035 bool safe_for_replace = true; 2036 if (ACmpOnValues != 1) { 2037 // Emit old acmp before new acmp for quick a != b check 2038 cmp = CmpP(a, b); 2039 cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); 2040 if (btest == BoolTest::ne) { 2041 do_if(btest, cmp, true); 2042 if (stopped()) { 2043 return; // Never equal 2044 } 2045 } else if (btest == BoolTest::eq) { 2046 Node* is_equal = NULL; 2047 { 2048 PreserveJVMState pjvms(this); 2049 do_if(btest, cmp, false, &is_equal); 2050 if (!stopped()) { 2051 // Not equal, skip valuetype check 2052 ctrl = new RegionNode(3); 2053 ctrl->init_req(1, control()); 2054 _gvn.set_type(ctrl, Type::CONTROL); 2055 record_for_igvn(ctrl); 2056 safe_for_replace = false; 2057 } 2058 } 2059 if (is_equal == NULL) { 2060 assert(ctrl != NULL, "no control left"); 2061 set_control(_gvn.transform(ctrl)); 2062 return; // Never equal 2063 } 2064 set_control(is_equal); 2065 } 2066 } 2067 2068 // Null check operand before loading the is_value bit 2069 bool speculate = false; 2070 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(b))) { 2071 // Operand 'b' is never null, swap operands to avoid null check 2072 swap(a, b); 2073 } else if (!too_many_traps(Deoptimization::Reason_speculate_null_check)) { 2074 // Speculate on non-nullness of one operand 2075 if (!_gvn.type(a)->speculative_maybe_null()) { 2076 speculate = true; 2077 } else if (!_gvn.type(b)->speculative_maybe_null()) { 2078 speculate = true; 2079 swap(a, b); 2080 } 2081 } 2082 inc_sp(2); 2083 Node* null_ctl = top(); 2084 Node* not_null_a = null_check_oop(a, &null_ctl, speculate, safe_for_replace, speculate); 2085 assert(!stopped(), "operand is always null"); 2086 dec_sp(2); 2087 Node* region = new RegionNode(2); 2088 Node* is_value = new PhiNode(region, TypeX_X); 2089 if (null_ctl != top()) { 2090 assert(!speculate, "should never be null"); 2091 region->add_req(null_ctl); 2092 is_value->add_req(_gvn.MakeConX(0)); 2093 } 2094 2095 Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); 2096 if (ACmpOnValues == 1) { 2097 Node* mark_addr = basic_plus_adr(not_null_a, oopDesc::mark_offset_in_bytes()); 2098 Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 2099 Node* not_mark = _gvn.transform(new XorXNode(mark, _gvn.MakeConX(-1))); 2100 Node* andn = _gvn.transform(new AndXNode(not_mark, value_mask)); 2101 Node* neg_if_value = _gvn.transform(new SubXNode(andn, _gvn.MakeConX(1))); 2102 is_value->init_req(1, _gvn.transform(new RShiftXNode(neg_if_value, _gvn.intcon(63)))); 2103 } else { 2104 is_value->init_req(1, is_always_locked(not_null_a)); 2105 } 2106 region->init_req(1, control()); 2107 2108 set_control(_gvn.transform(region)); 2109 is_value = _gvn.transform(is_value); 2110 2111 if (ACmpOnValues == 1) { 2112 // Perturbe oop if operand is a value type to make comparison fail 2113 Node* pert = _gvn.transform(new AddPNode(a, a, is_value)); 2114 cmp = _gvn.transform(new CmpPNode(pert, b)); 2115 } else { 2116 // Check for a value type because we already know that operands are equal 2117 cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); 2118 btest = (btest == BoolTest::eq) ? BoolTest::ne : BoolTest::eq; 2119 } 2120 cmp = optimize_cmp_with_klass(cmp); 2121 do_if(btest, cmp); 2122 2123 if (ctrl != NULL) { 2124 ctrl->init_req(2, control()); 2125 set_control(_gvn.transform(ctrl)); 2126 } 2127 } 2128 2129 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 2130 // Don't want to speculate on uncommon traps when running with -Xcomp 2131 if (!UseInterpreter) { 2132 return false; 2133 } 2134 return (seems_never_taken(prob) && seems_stable_comparison()); 2135 } 2136 2137 void Parse::maybe_add_predicate_after_if(Block* path) { 2138 if (path->is_SEL_head() && path->preds_parsed() == 0) { 2139 // Add predicates at bci of if dominating the loop so traps can be 2140 // recorded on the if's profile data 2141 int bc_depth = repush_if_args(); 2142 add_predicate(); 2143 dec_sp(bc_depth); 2144 path->set_has_predicates(); 2145 } 2146 } 2147 2148 2149 //----------------------------adjust_map_after_if------------------------------ 2150 // Adjust the JVM state to reflect the result of taking this path. 2151 // Basically, it means inspecting the CmpNode controlling this 2152 // branch, seeing how it constrains a tested value, and then 2153 // deciding if it's worth our while to encode this constraint 2154 // as graph nodes in the current abstract interpretation map. 2155 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) { 2156 if (!c->is_Cmp()) { 2157 maybe_add_predicate_after_if(path); 2158 return; 2159 } 2160 2161 if (stopped() || btest == BoolTest::illegal) { 2162 return; // nothing to do 2163 } 2164 2165 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 2166 2167 if (path_is_suitable_for_uncommon_trap(prob)) { 2168 repush_if_args(); 2169 uncommon_trap(Deoptimization::Reason_unstable_if, 2170 Deoptimization::Action_reinterpret, 2171 NULL, 2172 (is_fallthrough ? "taken always" : "taken never")); 2173 return; 2174 } 2175 2176 Node* val = c->in(1); 2177 Node* con = c->in(2); 2178 const Type* tcon = _gvn.type(con); 2179 const Type* tval = _gvn.type(val); 2180 bool have_con = tcon->singleton(); 2181 if (tval->singleton()) { 2182 if (!have_con) { 2183 // Swap, so constant is in con. 2184 con = val; 2185 tcon = tval; 2186 val = c->in(2); 2187 tval = _gvn.type(val); 2188 btest = BoolTest(btest).commute(); 2189 have_con = true; 2190 } else { 2191 // Do we have two constants? Then leave well enough alone. 2192 have_con = false; 2193 } 2194 } 2195 if (!have_con) { // remaining adjustments need a con 2196 maybe_add_predicate_after_if(path); 2197 return; 2198 } 2199 2200 sharpen_type_after_if(btest, con, tcon, val, tval); 2201 maybe_add_predicate_after_if(path); 2202 } 2203 2204 2205 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 2206 Node* ldk; 2207 if (n->is_DecodeNKlass()) { 2208 if (n->in(1)->Opcode() != Op_LoadNKlass) { 2209 return NULL; 2210 } else { 2211 ldk = n->in(1); 2212 } 2213 } else if (n->Opcode() != Op_LoadKlass) { 2214 return NULL; 2215 } else { 2216 ldk = n; 2217 } 2218 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 2219 2220 Node* adr = ldk->in(MemNode::Address); 2221 intptr_t off = 0; 2222 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 2223 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 2224 return NULL; 2225 const TypePtr* tp = gvn->type(obj)->is_ptr(); 2226 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 2227 return NULL; 2228 2229 return obj; 2230 } 2231 2232 void Parse::sharpen_type_after_if(BoolTest::mask btest, 2233 Node* con, const Type* tcon, 2234 Node* val, const Type* tval) { 2235 // Look for opportunities to sharpen the type of a node 2236 // whose klass is compared with a constant klass. 2237 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 2238 Node* obj = extract_obj_from_klass_load(&_gvn, val); 2239 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 2240 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 2241 // Found: 2242 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 2243 // or the narrowOop equivalent. 2244 const Type* obj_type = _gvn.type(obj); 2245 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 2246 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 2247 tboth->higher_equal(obj_type)) { 2248 // obj has to be of the exact type Foo if the CmpP succeeds. 2249 int obj_in_map = map()->find_edge(obj); 2250 JVMState* jvms = this->jvms(); 2251 if (obj_in_map >= 0 && 2252 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 2253 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 2254 const Type* tcc = ccast->as_Type()->type(); 2255 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 2256 // Delay transform() call to allow recovery of pre-cast value 2257 // at the control merge. 2258 _gvn.set_type_bottom(ccast); 2259 record_for_igvn(ccast); 2260 // Here's the payoff. 2261 replace_in_map(obj, ccast); 2262 } 2263 } 2264 } 2265 } 2266 2267 int val_in_map = map()->find_edge(val); 2268 if (val_in_map < 0) return; // replace_in_map would be useless 2269 { 2270 JVMState* jvms = this->jvms(); 2271 if (!(jvms->is_loc(val_in_map) || 2272 jvms->is_stk(val_in_map))) 2273 return; // again, it would be useless 2274 } 2275 2276 // Check for a comparison to a constant, and "know" that the compared 2277 // value is constrained on this path. 2278 assert(tcon->singleton(), ""); 2279 ConstraintCastNode* ccast = NULL; 2280 Node* cast = NULL; 2281 2282 switch (btest) { 2283 case BoolTest::eq: // Constant test? 2284 { 2285 const Type* tboth = tcon->join_speculative(tval); 2286 if (tboth == tval) break; // Nothing to gain. 2287 if (tcon->isa_int()) { 2288 ccast = new CastIINode(val, tboth); 2289 } else if (tcon == TypePtr::NULL_PTR) { 2290 // Cast to null, but keep the pointer identity temporarily live. 2291 ccast = new CastPPNode(val, tboth); 2292 } else { 2293 const TypeF* tf = tcon->isa_float_constant(); 2294 const TypeD* td = tcon->isa_double_constant(); 2295 // Exclude tests vs float/double 0 as these could be 2296 // either +0 or -0. Just because you are equal to +0 2297 // doesn't mean you ARE +0! 2298 // Note, following code also replaces Long and Oop values. 2299 if ((!tf || tf->_f != 0.0) && 2300 (!td || td->_d != 0.0)) 2301 cast = con; // Replace non-constant val by con. 2302 } 2303 } 2304 break; 2305 2306 case BoolTest::ne: 2307 if (tcon == TypePtr::NULL_PTR) { 2308 cast = cast_not_null(val, false); 2309 } 2310 break; 2311 2312 default: 2313 // (At this point we could record int range types with CastII.) 2314 break; 2315 } 2316 2317 if (ccast != NULL) { 2318 const Type* tcc = ccast->as_Type()->type(); 2319 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 2320 // Delay transform() call to allow recovery of pre-cast value 2321 // at the control merge. 2322 ccast->set_req(0, control()); 2323 _gvn.set_type_bottom(ccast); 2324 record_for_igvn(ccast); 2325 cast = ccast; 2326 } 2327 2328 if (cast != NULL) { // Here's the payoff. 2329 replace_in_map(val, cast); 2330 } 2331 } 2332 2333 /** 2334 * Use speculative type to optimize CmpP node: if comparison is 2335 * against the low level class, cast the object to the speculative 2336 * type if any. CmpP should then go away. 2337 * 2338 * @param c expected CmpP node 2339 * @return result of CmpP on object casted to speculative type 2340 * 2341 */ 2342 Node* Parse::optimize_cmp_with_klass(Node* c) { 2343 // If this is transformed by the _gvn to a comparison with the low 2344 // level klass then we may be able to use speculation 2345 if (c->Opcode() == Op_CmpP && 2346 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 2347 c->in(2)->is_Con()) { 2348 Node* load_klass = NULL; 2349 Node* decode = NULL; 2350 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 2351 decode = c->in(1); 2352 load_klass = c->in(1)->in(1); 2353 } else { 2354 load_klass = c->in(1); 2355 } 2356 if (load_klass->in(2)->is_AddP()) { 2357 Node* addp = load_klass->in(2); 2358 Node* obj = addp->in(AddPNode::Address); 2359 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 2360 if (obj_type->speculative_type_not_null() != NULL) { 2361 ciKlass* k = obj_type->speculative_type(); 2362 inc_sp(2); 2363 obj = maybe_cast_profiled_obj(obj, k); 2364 dec_sp(2); 2365 if (obj->is_ValueType()) { 2366 assert(obj->as_ValueType()->is_allocated(&_gvn), "must be allocated"); 2367 obj = obj->as_ValueType()->get_oop(); 2368 } 2369 // Make the CmpP use the casted obj 2370 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 2371 load_klass = load_klass->clone(); 2372 load_klass->set_req(2, addp); 2373 load_klass = _gvn.transform(load_klass); 2374 if (decode != NULL) { 2375 decode = decode->clone(); 2376 decode->set_req(1, load_klass); 2377 load_klass = _gvn.transform(decode); 2378 } 2379 c = c->clone(); 2380 c->set_req(1, load_klass); 2381 c = _gvn.transform(c); 2382 } 2383 } 2384 } 2385 return c; 2386 } 2387 2388 //------------------------------do_one_bytecode-------------------------------- 2389 // Parse this bytecode, and alter the Parsers JVM->Node mapping 2390 void Parse::do_one_bytecode() { 2391 Node *a, *b, *c, *d; // Handy temps 2392 BoolTest::mask btest; 2393 int i; 2394 2395 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 2396 2397 if (C->check_node_count(NodeLimitFudgeFactor * 5, 2398 "out of nodes parsing method")) { 2399 return; 2400 } 2401 2402 #ifdef ASSERT 2403 // for setting breakpoints 2404 if (TraceOptoParse) { 2405 tty->print(" @"); 2406 dump_bci(bci()); 2407 tty->cr(); 2408 } 2409 #endif 2410 2411 switch (bc()) { 2412 case Bytecodes::_nop: 2413 // do nothing 2414 break; 2415 case Bytecodes::_lconst_0: 2416 push_pair(longcon(0)); 2417 break; 2418 2419 case Bytecodes::_lconst_1: 2420 push_pair(longcon(1)); 2421 break; 2422 2423 case Bytecodes::_fconst_0: 2424 push(zerocon(T_FLOAT)); 2425 break; 2426 2427 case Bytecodes::_fconst_1: 2428 push(makecon(TypeF::ONE)); 2429 break; 2430 2431 case Bytecodes::_fconst_2: 2432 push(makecon(TypeF::make(2.0f))); 2433 break; 2434 2435 case Bytecodes::_dconst_0: 2436 push_pair(zerocon(T_DOUBLE)); 2437 break; 2438 2439 case Bytecodes::_dconst_1: 2440 push_pair(makecon(TypeD::ONE)); 2441 break; 2442 2443 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 2444 case Bytecodes::_iconst_0: push(intcon( 0)); break; 2445 case Bytecodes::_iconst_1: push(intcon( 1)); break; 2446 case Bytecodes::_iconst_2: push(intcon( 2)); break; 2447 case Bytecodes::_iconst_3: push(intcon( 3)); break; 2448 case Bytecodes::_iconst_4: push(intcon( 4)); break; 2449 case Bytecodes::_iconst_5: push(intcon( 5)); break; 2450 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 2451 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 2452 case Bytecodes::_aconst_null: push(null()); break; 2453 case Bytecodes::_ldc: 2454 case Bytecodes::_ldc_w: 2455 case Bytecodes::_ldc2_w: 2456 // If the constant is unresolved, run this BC once in the interpreter. 2457 { 2458 ciConstant constant = iter().get_constant(); 2459 if (!constant.is_valid() || 2460 (constant.basic_type() == T_OBJECT && 2461 !constant.as_object()->is_loaded())) { 2462 int index = iter().get_constant_pool_index(); 2463 constantTag tag = iter().get_constant_pool_tag(index); 2464 uncommon_trap(Deoptimization::make_trap_request 2465 (Deoptimization::Reason_unloaded, 2466 Deoptimization::Action_reinterpret, 2467 index), 2468 NULL, tag.internal_name()); 2469 break; 2470 } 2471 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 2472 "must be java_mirror of klass"); 2473 const Type* con_type = Type::make_from_constant(constant); 2474 if (con_type != NULL) { 2475 push_node(con_type->basic_type(), makecon(con_type)); 2476 } 2477 } 2478 2479 break; 2480 2481 case Bytecodes::_aload_0: 2482 push( local(0) ); 2483 break; 2484 case Bytecodes::_aload_1: 2485 push( local(1) ); 2486 break; 2487 case Bytecodes::_aload_2: 2488 push( local(2) ); 2489 break; 2490 case Bytecodes::_aload_3: 2491 push( local(3) ); 2492 break; 2493 case Bytecodes::_aload: 2494 push( local(iter().get_index()) ); 2495 break; 2496 2497 case Bytecodes::_fload_0: 2498 case Bytecodes::_iload_0: 2499 push( local(0) ); 2500 break; 2501 case Bytecodes::_fload_1: 2502 case Bytecodes::_iload_1: 2503 push( local(1) ); 2504 break; 2505 case Bytecodes::_fload_2: 2506 case Bytecodes::_iload_2: 2507 push( local(2) ); 2508 break; 2509 case Bytecodes::_fload_3: 2510 case Bytecodes::_iload_3: 2511 push( local(3) ); 2512 break; 2513 case Bytecodes::_fload: 2514 case Bytecodes::_iload: 2515 push( local(iter().get_index()) ); 2516 break; 2517 case Bytecodes::_lload_0: 2518 push_pair_local( 0 ); 2519 break; 2520 case Bytecodes::_lload_1: 2521 push_pair_local( 1 ); 2522 break; 2523 case Bytecodes::_lload_2: 2524 push_pair_local( 2 ); 2525 break; 2526 case Bytecodes::_lload_3: 2527 push_pair_local( 3 ); 2528 break; 2529 case Bytecodes::_lload: 2530 push_pair_local( iter().get_index() ); 2531 break; 2532 2533 case Bytecodes::_dload_0: 2534 push_pair_local(0); 2535 break; 2536 case Bytecodes::_dload_1: 2537 push_pair_local(1); 2538 break; 2539 case Bytecodes::_dload_2: 2540 push_pair_local(2); 2541 break; 2542 case Bytecodes::_dload_3: 2543 push_pair_local(3); 2544 break; 2545 case Bytecodes::_dload: 2546 push_pair_local(iter().get_index()); 2547 break; 2548 case Bytecodes::_fstore_0: 2549 case Bytecodes::_istore_0: 2550 case Bytecodes::_astore_0: 2551 set_local( 0, pop() ); 2552 break; 2553 case Bytecodes::_fstore_1: 2554 case Bytecodes::_istore_1: 2555 case Bytecodes::_astore_1: 2556 set_local( 1, pop() ); 2557 break; 2558 case Bytecodes::_fstore_2: 2559 case Bytecodes::_istore_2: 2560 case Bytecodes::_astore_2: 2561 set_local( 2, pop() ); 2562 break; 2563 case Bytecodes::_fstore_3: 2564 case Bytecodes::_istore_3: 2565 case Bytecodes::_astore_3: 2566 set_local( 3, pop() ); 2567 break; 2568 case Bytecodes::_fstore: 2569 case Bytecodes::_istore: 2570 case Bytecodes::_astore: 2571 set_local( iter().get_index(), pop() ); 2572 break; 2573 // long stores 2574 case Bytecodes::_lstore_0: 2575 set_pair_local( 0, pop_pair() ); 2576 break; 2577 case Bytecodes::_lstore_1: 2578 set_pair_local( 1, pop_pair() ); 2579 break; 2580 case Bytecodes::_lstore_2: 2581 set_pair_local( 2, pop_pair() ); 2582 break; 2583 case Bytecodes::_lstore_3: 2584 set_pair_local( 3, pop_pair() ); 2585 break; 2586 case Bytecodes::_lstore: 2587 set_pair_local( iter().get_index(), pop_pair() ); 2588 break; 2589 2590 // double stores 2591 case Bytecodes::_dstore_0: 2592 set_pair_local( 0, dstore_rounding(pop_pair()) ); 2593 break; 2594 case Bytecodes::_dstore_1: 2595 set_pair_local( 1, dstore_rounding(pop_pair()) ); 2596 break; 2597 case Bytecodes::_dstore_2: 2598 set_pair_local( 2, dstore_rounding(pop_pair()) ); 2599 break; 2600 case Bytecodes::_dstore_3: 2601 set_pair_local( 3, dstore_rounding(pop_pair()) ); 2602 break; 2603 case Bytecodes::_dstore: 2604 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 2605 break; 2606 2607 case Bytecodes::_pop: dec_sp(1); break; 2608 case Bytecodes::_pop2: dec_sp(2); break; 2609 case Bytecodes::_swap: 2610 a = pop(); 2611 b = pop(); 2612 push(a); 2613 push(b); 2614 break; 2615 case Bytecodes::_dup: 2616 a = pop(); 2617 push(a); 2618 push(a); 2619 break; 2620 case Bytecodes::_dup_x1: 2621 a = pop(); 2622 b = pop(); 2623 push( a ); 2624 push( b ); 2625 push( a ); 2626 break; 2627 case Bytecodes::_dup_x2: 2628 a = pop(); 2629 b = pop(); 2630 c = pop(); 2631 push( a ); 2632 push( c ); 2633 push( b ); 2634 push( a ); 2635 break; 2636 case Bytecodes::_dup2: 2637 a = pop(); 2638 b = pop(); 2639 push( b ); 2640 push( a ); 2641 push( b ); 2642 push( a ); 2643 break; 2644 2645 case Bytecodes::_dup2_x1: 2646 // before: .. c, b, a 2647 // after: .. b, a, c, b, a 2648 // not tested 2649 a = pop(); 2650 b = pop(); 2651 c = pop(); 2652 push( b ); 2653 push( a ); 2654 push( c ); 2655 push( b ); 2656 push( a ); 2657 break; 2658 case Bytecodes::_dup2_x2: 2659 // before: .. d, c, b, a 2660 // after: .. b, a, d, c, b, a 2661 // not tested 2662 a = pop(); 2663 b = pop(); 2664 c = pop(); 2665 d = pop(); 2666 push( b ); 2667 push( a ); 2668 push( d ); 2669 push( c ); 2670 push( b ); 2671 push( a ); 2672 break; 2673 2674 case Bytecodes::_arraylength: { 2675 // Must do null-check with value on expression stack 2676 Node *ary = null_check(peek(), T_ARRAY); 2677 // Compile-time detect of null-exception? 2678 if (stopped()) return; 2679 a = pop(); 2680 push(load_array_length(a)); 2681 break; 2682 } 2683 2684 case Bytecodes::_baload: array_load(T_BYTE); break; 2685 case Bytecodes::_caload: array_load(T_CHAR); break; 2686 case Bytecodes::_iaload: array_load(T_INT); break; 2687 case Bytecodes::_saload: array_load(T_SHORT); break; 2688 case Bytecodes::_faload: array_load(T_FLOAT); break; 2689 case Bytecodes::_aaload: array_load(T_OBJECT); break; 2690 case Bytecodes::_laload: array_load(T_LONG); break; 2691 case Bytecodes::_daload: array_load(T_DOUBLE); break; 2692 case Bytecodes::_bastore: array_store(T_BYTE); break; 2693 case Bytecodes::_castore: array_store(T_CHAR); break; 2694 case Bytecodes::_iastore: array_store(T_INT); break; 2695 case Bytecodes::_sastore: array_store(T_SHORT); break; 2696 case Bytecodes::_fastore: array_store(T_FLOAT); break; 2697 case Bytecodes::_aastore: array_store(T_OBJECT); break; 2698 case Bytecodes::_lastore: array_store(T_LONG); break; 2699 case Bytecodes::_dastore: array_store(T_DOUBLE); break; 2700 2701 case Bytecodes::_getfield: 2702 do_getfield(); 2703 break; 2704 2705 case Bytecodes::_getstatic: 2706 do_getstatic(); 2707 break; 2708 2709 case Bytecodes::_putfield: 2710 do_putfield(); 2711 break; 2712 2713 case Bytecodes::_putstatic: 2714 do_putstatic(); 2715 break; 2716 2717 case Bytecodes::_irem: 2718 do_irem(); 2719 break; 2720 case Bytecodes::_idiv: 2721 // Must keep both values on the expression-stack during null-check 2722 zero_check_int(peek()); 2723 // Compile-time detect of null-exception? 2724 if (stopped()) return; 2725 b = pop(); 2726 a = pop(); 2727 push( _gvn.transform( new DivINode(control(),a,b) ) ); 2728 break; 2729 case Bytecodes::_imul: 2730 b = pop(); a = pop(); 2731 push( _gvn.transform( new MulINode(a,b) ) ); 2732 break; 2733 case Bytecodes::_iadd: 2734 b = pop(); a = pop(); 2735 push( _gvn.transform( new AddINode(a,b) ) ); 2736 break; 2737 case Bytecodes::_ineg: 2738 a = pop(); 2739 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 2740 break; 2741 case Bytecodes::_isub: 2742 b = pop(); a = pop(); 2743 push( _gvn.transform( new SubINode(a,b) ) ); 2744 break; 2745 case Bytecodes::_iand: 2746 b = pop(); a = pop(); 2747 push( _gvn.transform( new AndINode(a,b) ) ); 2748 break; 2749 case Bytecodes::_ior: 2750 b = pop(); a = pop(); 2751 push( _gvn.transform( new OrINode(a,b) ) ); 2752 break; 2753 case Bytecodes::_ixor: 2754 b = pop(); a = pop(); 2755 push( _gvn.transform( new XorINode(a,b) ) ); 2756 break; 2757 case Bytecodes::_ishl: 2758 b = pop(); a = pop(); 2759 push( _gvn.transform( new LShiftINode(a,b) ) ); 2760 break; 2761 case Bytecodes::_ishr: 2762 b = pop(); a = pop(); 2763 push( _gvn.transform( new RShiftINode(a,b) ) ); 2764 break; 2765 case Bytecodes::_iushr: 2766 b = pop(); a = pop(); 2767 push( _gvn.transform( new URShiftINode(a,b) ) ); 2768 break; 2769 2770 case Bytecodes::_fneg: 2771 a = pop(); 2772 b = _gvn.transform(new NegFNode (a)); 2773 push(b); 2774 break; 2775 2776 case Bytecodes::_fsub: 2777 b = pop(); 2778 a = pop(); 2779 c = _gvn.transform( new SubFNode(a,b) ); 2780 d = precision_rounding(c); 2781 push( d ); 2782 break; 2783 2784 case Bytecodes::_fadd: 2785 b = pop(); 2786 a = pop(); 2787 c = _gvn.transform( new AddFNode(a,b) ); 2788 d = precision_rounding(c); 2789 push( d ); 2790 break; 2791 2792 case Bytecodes::_fmul: 2793 b = pop(); 2794 a = pop(); 2795 c = _gvn.transform( new MulFNode(a,b) ); 2796 d = precision_rounding(c); 2797 push( d ); 2798 break; 2799 2800 case Bytecodes::_fdiv: 2801 b = pop(); 2802 a = pop(); 2803 c = _gvn.transform( new DivFNode(0,a,b) ); 2804 d = precision_rounding(c); 2805 push( d ); 2806 break; 2807 2808 case Bytecodes::_frem: 2809 if (Matcher::has_match_rule(Op_ModF)) { 2810 // Generate a ModF node. 2811 b = pop(); 2812 a = pop(); 2813 c = _gvn.transform( new ModFNode(0,a,b) ); 2814 d = precision_rounding(c); 2815 push( d ); 2816 } 2817 else { 2818 // Generate a call. 2819 modf(); 2820 } 2821 break; 2822 2823 case Bytecodes::_fcmpl: 2824 b = pop(); 2825 a = pop(); 2826 c = _gvn.transform( new CmpF3Node( a, b)); 2827 push(c); 2828 break; 2829 case Bytecodes::_fcmpg: 2830 b = pop(); 2831 a = pop(); 2832 2833 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 2834 // which negates the result sign except for unordered. Flip the unordered 2835 // as well by using CmpF3 which implements unordered-lesser instead of 2836 // unordered-greater semantics. Finally, commute the result bits. Result 2837 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 2838 c = _gvn.transform( new CmpF3Node( b, a)); 2839 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2840 push(c); 2841 break; 2842 2843 case Bytecodes::_f2i: 2844 a = pop(); 2845 push(_gvn.transform(new ConvF2INode(a))); 2846 break; 2847 2848 case Bytecodes::_d2i: 2849 a = pop_pair(); 2850 b = _gvn.transform(new ConvD2INode(a)); 2851 push( b ); 2852 break; 2853 2854 case Bytecodes::_f2d: 2855 a = pop(); 2856 b = _gvn.transform( new ConvF2DNode(a)); 2857 push_pair( b ); 2858 break; 2859 2860 case Bytecodes::_d2f: 2861 a = pop_pair(); 2862 b = _gvn.transform( new ConvD2FNode(a)); 2863 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 2864 //b = _gvn.transform(new RoundFloatNode(0, b) ); 2865 push( b ); 2866 break; 2867 2868 case Bytecodes::_l2f: 2869 if (Matcher::convL2FSupported()) { 2870 a = pop_pair(); 2871 b = _gvn.transform( new ConvL2FNode(a)); 2872 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 2873 // Rather than storing the result into an FP register then pushing 2874 // out to memory to round, the machine instruction that implements 2875 // ConvL2D is responsible for rounding. 2876 // c = precision_rounding(b); 2877 c = _gvn.transform(b); 2878 push(c); 2879 } else { 2880 l2f(); 2881 } 2882 break; 2883 2884 case Bytecodes::_l2d: 2885 a = pop_pair(); 2886 b = _gvn.transform( new ConvL2DNode(a)); 2887 // For i486.ad, rounding is always necessary (see _l2f above). 2888 // c = dprecision_rounding(b); 2889 c = _gvn.transform(b); 2890 push_pair(c); 2891 break; 2892 2893 case Bytecodes::_f2l: 2894 a = pop(); 2895 b = _gvn.transform( new ConvF2LNode(a)); 2896 push_pair(b); 2897 break; 2898 2899 case Bytecodes::_d2l: 2900 a = pop_pair(); 2901 b = _gvn.transform( new ConvD2LNode(a)); 2902 push_pair(b); 2903 break; 2904 2905 case Bytecodes::_dsub: 2906 b = pop_pair(); 2907 a = pop_pair(); 2908 c = _gvn.transform( new SubDNode(a,b) ); 2909 d = dprecision_rounding(c); 2910 push_pair( d ); 2911 break; 2912 2913 case Bytecodes::_dadd: 2914 b = pop_pair(); 2915 a = pop_pair(); 2916 c = _gvn.transform( new AddDNode(a,b) ); 2917 d = dprecision_rounding(c); 2918 push_pair( d ); 2919 break; 2920 2921 case Bytecodes::_dmul: 2922 b = pop_pair(); 2923 a = pop_pair(); 2924 c = _gvn.transform( new MulDNode(a,b) ); 2925 d = dprecision_rounding(c); 2926 push_pair( d ); 2927 break; 2928 2929 case Bytecodes::_ddiv: 2930 b = pop_pair(); 2931 a = pop_pair(); 2932 c = _gvn.transform( new DivDNode(0,a,b) ); 2933 d = dprecision_rounding(c); 2934 push_pair( d ); 2935 break; 2936 2937 case Bytecodes::_dneg: 2938 a = pop_pair(); 2939 b = _gvn.transform(new NegDNode (a)); 2940 push_pair(b); 2941 break; 2942 2943 case Bytecodes::_drem: 2944 if (Matcher::has_match_rule(Op_ModD)) { 2945 // Generate a ModD node. 2946 b = pop_pair(); 2947 a = pop_pair(); 2948 // a % b 2949 2950 c = _gvn.transform( new ModDNode(0,a,b) ); 2951 d = dprecision_rounding(c); 2952 push_pair( d ); 2953 } 2954 else { 2955 // Generate a call. 2956 modd(); 2957 } 2958 break; 2959 2960 case Bytecodes::_dcmpl: 2961 b = pop_pair(); 2962 a = pop_pair(); 2963 c = _gvn.transform( new CmpD3Node( a, b)); 2964 push(c); 2965 break; 2966 2967 case Bytecodes::_dcmpg: 2968 b = pop_pair(); 2969 a = pop_pair(); 2970 // Same as dcmpl but need to flip the unordered case. 2971 // Commute the inputs, which negates the result sign except for unordered. 2972 // Flip the unordered as well by using CmpD3 which implements 2973 // unordered-lesser instead of unordered-greater semantics. 2974 // Finally, negate the result bits. Result is same as using a 2975 // CmpD3Greater except we did it with CmpD3 alone. 2976 c = _gvn.transform( new CmpD3Node( b, a)); 2977 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2978 push(c); 2979 break; 2980 2981 2982 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2983 case Bytecodes::_land: 2984 b = pop_pair(); 2985 a = pop_pair(); 2986 c = _gvn.transform( new AndLNode(a,b) ); 2987 push_pair(c); 2988 break; 2989 case Bytecodes::_lor: 2990 b = pop_pair(); 2991 a = pop_pair(); 2992 c = _gvn.transform( new OrLNode(a,b) ); 2993 push_pair(c); 2994 break; 2995 case Bytecodes::_lxor: 2996 b = pop_pair(); 2997 a = pop_pair(); 2998 c = _gvn.transform( new XorLNode(a,b) ); 2999 push_pair(c); 3000 break; 3001 3002 case Bytecodes::_lshl: 3003 b = pop(); // the shift count 3004 a = pop_pair(); // value to be shifted 3005 c = _gvn.transform( new LShiftLNode(a,b) ); 3006 push_pair(c); 3007 break; 3008 case Bytecodes::_lshr: 3009 b = pop(); // the shift count 3010 a = pop_pair(); // value to be shifted 3011 c = _gvn.transform( new RShiftLNode(a,b) ); 3012 push_pair(c); 3013 break; 3014 case Bytecodes::_lushr: 3015 b = pop(); // the shift count 3016 a = pop_pair(); // value to be shifted 3017 c = _gvn.transform( new URShiftLNode(a,b) ); 3018 push_pair(c); 3019 break; 3020 case Bytecodes::_lmul: 3021 b = pop_pair(); 3022 a = pop_pair(); 3023 c = _gvn.transform( new MulLNode(a,b) ); 3024 push_pair(c); 3025 break; 3026 3027 case Bytecodes::_lrem: 3028 // Must keep both values on the expression-stack during null-check 3029 assert(peek(0) == top(), "long word order"); 3030 zero_check_long(peek(1)); 3031 // Compile-time detect of null-exception? 3032 if (stopped()) return; 3033 b = pop_pair(); 3034 a = pop_pair(); 3035 c = _gvn.transform( new ModLNode(control(),a,b) ); 3036 push_pair(c); 3037 break; 3038 3039 case Bytecodes::_ldiv: 3040 // Must keep both values on the expression-stack during null-check 3041 assert(peek(0) == top(), "long word order"); 3042 zero_check_long(peek(1)); 3043 // Compile-time detect of null-exception? 3044 if (stopped()) return; 3045 b = pop_pair(); 3046 a = pop_pair(); 3047 c = _gvn.transform( new DivLNode(control(),a,b) ); 3048 push_pair(c); 3049 break; 3050 3051 case Bytecodes::_ladd: 3052 b = pop_pair(); 3053 a = pop_pair(); 3054 c = _gvn.transform( new AddLNode(a,b) ); 3055 push_pair(c); 3056 break; 3057 case Bytecodes::_lsub: 3058 b = pop_pair(); 3059 a = pop_pair(); 3060 c = _gvn.transform( new SubLNode(a,b) ); 3061 push_pair(c); 3062 break; 3063 case Bytecodes::_lcmp: 3064 // Safepoints are now inserted _before_ branches. The long-compare 3065 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 3066 // slew of control flow. These are usually followed by a CmpI vs zero and 3067 // a branch; this pattern then optimizes to the obvious long-compare and 3068 // branch. However, if the branch is backwards there's a Safepoint 3069 // inserted. The inserted Safepoint captures the JVM state at the 3070 // pre-branch point, i.e. it captures the 3-way value. Thus if a 3071 // long-compare is used to control a loop the debug info will force 3072 // computation of the 3-way value, even though the generated code uses a 3073 // long-compare and branch. We try to rectify the situation by inserting 3074 // a SafePoint here and have it dominate and kill the safepoint added at a 3075 // following backwards branch. At this point the JVM state merely holds 2 3076 // longs but not the 3-way value. 3077 if( UseLoopSafepoints ) { 3078 switch( iter().next_bc() ) { 3079 case Bytecodes::_ifgt: 3080 case Bytecodes::_iflt: 3081 case Bytecodes::_ifge: 3082 case Bytecodes::_ifle: 3083 case Bytecodes::_ifne: 3084 case Bytecodes::_ifeq: 3085 // If this is a backwards branch in the bytecodes, add Safepoint 3086 maybe_add_safepoint(iter().next_get_dest()); 3087 default: 3088 break; 3089 } 3090 } 3091 b = pop_pair(); 3092 a = pop_pair(); 3093 c = _gvn.transform( new CmpL3Node( a, b )); 3094 push(c); 3095 break; 3096 3097 case Bytecodes::_lneg: 3098 a = pop_pair(); 3099 b = _gvn.transform( new SubLNode(longcon(0),a)); 3100 push_pair(b); 3101 break; 3102 case Bytecodes::_l2i: 3103 a = pop_pair(); 3104 push( _gvn.transform( new ConvL2INode(a))); 3105 break; 3106 case Bytecodes::_i2l: 3107 a = pop(); 3108 b = _gvn.transform( new ConvI2LNode(a)); 3109 push_pair(b); 3110 break; 3111 case Bytecodes::_i2b: 3112 // Sign extend 3113 a = pop(); 3114 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); 3115 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); 3116 push( a ); 3117 break; 3118 case Bytecodes::_i2s: 3119 a = pop(); 3120 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); 3121 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); 3122 push( a ); 3123 break; 3124 case Bytecodes::_i2c: 3125 a = pop(); 3126 push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); 3127 break; 3128 3129 case Bytecodes::_i2f: 3130 a = pop(); 3131 b = _gvn.transform( new ConvI2FNode(a) ) ; 3132 c = precision_rounding(b); 3133 push (b); 3134 break; 3135 3136 case Bytecodes::_i2d: 3137 a = pop(); 3138 b = _gvn.transform( new ConvI2DNode(a)); 3139 push_pair(b); 3140 break; 3141 3142 case Bytecodes::_iinc: // Increment local 3143 i = iter().get_index(); // Get local index 3144 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 3145 break; 3146 3147 // Exit points of synchronized methods must have an unlock node 3148 case Bytecodes::_return: 3149 return_current(NULL); 3150 break; 3151 3152 case Bytecodes::_ireturn: 3153 case Bytecodes::_areturn: 3154 case Bytecodes::_freturn: 3155 return_current(pop()); 3156 break; 3157 case Bytecodes::_lreturn: 3158 return_current(pop_pair()); 3159 break; 3160 case Bytecodes::_dreturn: 3161 return_current(pop_pair()); 3162 break; 3163 3164 case Bytecodes::_athrow: 3165 // null exception oop throws NULL pointer exception 3166 null_check(peek()); 3167 if (stopped()) return; 3168 // Hook the thrown exception directly to subsequent handlers. 3169 if (BailoutToInterpreterForThrows) { 3170 // Keep method interpreted from now on. 3171 uncommon_trap(Deoptimization::Reason_unhandled, 3172 Deoptimization::Action_make_not_compilable); 3173 return; 3174 } 3175 if (env()->jvmti_can_post_on_exceptions()) { 3176 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 3177 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 3178 } 3179 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 3180 add_exception_state(make_exception_state(peek())); 3181 break; 3182 3183 case Bytecodes::_goto: // fall through 3184 case Bytecodes::_goto_w: { 3185 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 3186 3187 // If this is a backwards branch in the bytecodes, add Safepoint 3188 maybe_add_safepoint(target_bci); 3189 3190 // Update method data 3191 profile_taken_branch(target_bci); 3192 3193 // Merge the current control into the target basic block 3194 merge(target_bci); 3195 3196 // See if we can get some profile data and hand it off to the next block 3197 Block *target_block = block()->successor_for_bci(target_bci); 3198 if (target_block->pred_count() != 1) break; 3199 ciMethodData* methodData = method()->method_data(); 3200 if (!methodData->is_mature()) break; 3201 ciProfileData* data = methodData->bci_to_data(bci()); 3202 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 3203 int taken = ((ciJumpData*)data)->taken(); 3204 taken = method()->scale_count(taken); 3205 target_block->set_count(taken); 3206 break; 3207 } 3208 3209 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 3210 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 3211 handle_if_null: 3212 // If this is a backwards branch in the bytecodes, add Safepoint 3213 maybe_add_safepoint(iter().get_dest()); 3214 a = null(); 3215 b = pop(); 3216 if (b->is_ValueType()) { 3217 // Return constant false because 'b' is always non-null 3218 c = _gvn.makecon(TypeInt::CC_GT); 3219 } else { 3220 if (!_gvn.type(b)->speculative_maybe_null() && 3221 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 3222 inc_sp(1); 3223 Node* null_ctl = top(); 3224 b = null_check_oop(b, &null_ctl, true, true, true); 3225 assert(null_ctl->is_top(), "no null control here"); 3226 dec_sp(1); 3227 } else if (_gvn.type(b)->speculative_always_null() && 3228 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { 3229 inc_sp(1); 3230 b = null_assert(b); 3231 dec_sp(1); 3232 } 3233 c = _gvn.transform( new CmpPNode(b, a) ); 3234 } 3235 do_ifnull(btest, c); 3236 break; 3237 3238 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 3239 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 3240 handle_if_acmp: 3241 // If this is a backwards branch in the bytecodes, add Safepoint 3242 maybe_add_safepoint(iter().get_dest()); 3243 a = access_resolve(pop(), 0); 3244 b = access_resolve(pop(), 0); 3245 do_acmp(btest, a, b); 3246 break; 3247 3248 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 3249 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 3250 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 3251 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 3252 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 3253 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 3254 handle_ifxx: 3255 // If this is a backwards branch in the bytecodes, add Safepoint 3256 maybe_add_safepoint(iter().get_dest()); 3257 a = _gvn.intcon(0); 3258 b = pop(); 3259 c = _gvn.transform( new CmpINode(b, a) ); 3260 do_if(btest, c); 3261 break; 3262 3263 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 3264 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 3265 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 3266 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 3267 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 3268 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 3269 handle_if_icmp: 3270 // If this is a backwards branch in the bytecodes, add Safepoint 3271 maybe_add_safepoint(iter().get_dest()); 3272 a = pop(); 3273 b = pop(); 3274 c = _gvn.transform( new CmpINode( b, a ) ); 3275 do_if(btest, c); 3276 break; 3277 3278 case Bytecodes::_tableswitch: 3279 do_tableswitch(); 3280 break; 3281 3282 case Bytecodes::_lookupswitch: 3283 do_lookupswitch(); 3284 break; 3285 3286 case Bytecodes::_invokestatic: 3287 case Bytecodes::_invokedynamic: 3288 case Bytecodes::_invokespecial: 3289 case Bytecodes::_invokevirtual: 3290 case Bytecodes::_invokeinterface: 3291 do_call(); 3292 break; 3293 case Bytecodes::_checkcast: 3294 do_checkcast(); 3295 break; 3296 case Bytecodes::_instanceof: 3297 do_instanceof(); 3298 break; 3299 case Bytecodes::_anewarray: 3300 do_newarray(); 3301 break; 3302 case Bytecodes::_newarray: 3303 do_newarray((BasicType)iter().get_index()); 3304 break; 3305 case Bytecodes::_multianewarray: 3306 do_multianewarray(); 3307 break; 3308 case Bytecodes::_new: 3309 do_new(); 3310 break; 3311 case Bytecodes::_defaultvalue: 3312 do_defaultvalue(); 3313 break; 3314 case Bytecodes::_withfield: 3315 do_withfield(); 3316 break; 3317 3318 case Bytecodes::_jsr: 3319 case Bytecodes::_jsr_w: 3320 do_jsr(); 3321 break; 3322 3323 case Bytecodes::_ret: 3324 do_ret(); 3325 break; 3326 3327 3328 case Bytecodes::_monitorenter: 3329 do_monitor_enter(); 3330 break; 3331 3332 case Bytecodes::_monitorexit: 3333 do_monitor_exit(); 3334 break; 3335 3336 case Bytecodes::_breakpoint: 3337 // Breakpoint set concurrently to compile 3338 // %%% use an uncommon trap? 3339 C->record_failure("breakpoint in method"); 3340 return; 3341 3342 default: 3343 #ifndef PRODUCT 3344 map()->dump(99); 3345 #endif 3346 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 3347 ShouldNotReachHere(); 3348 } 3349 3350 #ifndef PRODUCT 3351 IdealGraphPrinter *printer = C->printer(); 3352 if (printer && printer->should_print(1)) { 3353 char buffer[256]; 3354 jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 3355 bool old = printer->traverse_outs(); 3356 printer->set_traverse_outs(true); 3357 printer->print_method(buffer, 4); 3358 printer->set_traverse_outs(old); 3359 } 3360 #endif 3361 }