1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/idealKit.hpp" 40 #include "opto/matcher.hpp" 41 #include "opto/memnode.hpp" 42 #include "opto/mulnode.hpp" 43 #include "opto/opaquenode.hpp" 44 #include "opto/parse.hpp" 45 #include "opto/runtime.hpp" 46 #include "opto/valuetypenode.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 50 #ifndef PRODUCT 51 extern int explicit_null_checks_inserted, 52 explicit_null_checks_elided; 53 #endif 54 55 //---------------------------------array_load---------------------------------- 56 void Parse::array_load(BasicType bt) { 57 const Type* elemtype = Type::TOP; 58 Node* adr = array_addressing(bt, 0, &elemtype); 59 if (stopped()) return; // guaranteed null or range check 60 61 Node* idx = pop(); 62 Node* ary = pop(); 63 64 // Handle value type arrays 65 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 66 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 67 if (elemtype->isa_valuetype() != NULL) { 68 // Load from flattened value type array 69 ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); 70 ValueTypeNode* vt = ValueTypeNode::make_from_flattened(this, vk, ary, adr); 71 push(vt); 72 return; 73 } else if (elemptr != NULL && elemptr->is_valuetypeptr()) { 74 // Load from non-flattened value type array (elements can never be null) 75 bt = T_VALUETYPE; 76 assert(elemptr->meet(TypePtr::NULL_PTR) != elemptr, "value type array elements should never be null"); 77 } else if (ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type() && 78 !ary_t->klass_is_exact()) { 79 // Cannot statically determine if array is flattened, emit runtime check 80 IdealKit ideal(this); 81 IdealVariable res(ideal); 82 ideal.declarations_done(); 83 Node* kls = load_object_klass(ary); 84 Node* tag = load_lh_array_tag(kls); 85 ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { 86 // non flattened 87 sync_kit(ideal); 88 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 89 elemtype = ary_t->elem()->make_oopptr(); 90 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, 91 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 92 ideal.sync_kit(this); 93 ideal.set(res, ld); 94 } ideal.else_(); { 95 // flattened 96 sync_kit(ideal); 97 Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset())); 98 Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); 99 Node* obj_size = NULL; 100 kill_dead_locals(); 101 inc_sp(2); 102 Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true); 103 dec_sp(2); 104 105 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); 106 assert(alloc->maybe_set_complete(&_gvn), ""); 107 alloc->initialization()->set_complete_with_arraycopy(); 108 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 109 // Unknown value type so might have reference fields 110 if (!bs->array_copy_requires_gc_barriers(T_OBJECT)) { 111 int base_off = sizeof(instanceOopDesc); 112 Node* dst_base = basic_plus_adr(alloc_obj, base_off); 113 Node* countx = obj_size; 114 countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); 115 countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); 116 117 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); 118 Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset())); 119 Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); 120 uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE); 121 Node* base = basic_plus_adr(ary, header); 122 idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control()); 123 Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift)); 124 Node* adr = basic_plus_adr(ary, base, scale); 125 126 access_clone(control(), adr, dst_base, countx, false); 127 } else { 128 ideal.sync_kit(this); 129 ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(), 130 CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value), 131 "load_unknown_value", 132 ary, idx, alloc_obj); 133 sync_kit(ideal); 134 } 135 136 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); 137 138 ideal.sync_kit(this); 139 ideal.set(res, alloc_obj); 140 } ideal.end_if(); 141 sync_kit(ideal); 142 push_node(bt, ideal.value(res)); 143 return; 144 } 145 146 if (elemtype == TypeInt::BOOL) { 147 bt = T_BOOLEAN; 148 } else if (bt == T_OBJECT) { 149 elemtype = ary_t->elem()->make_oopptr(); 150 } 151 152 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 153 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, 154 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 155 if (bt == T_VALUETYPE) { 156 // Loading a non-flattened (but flattenable) value type from an array 157 assert(!gvn().type(ld)->is_ptr()->maybe_null(), "value type array elements should never be null"); 158 ld = ValueTypeNode::make_from_oop(this, ld, elemptr->value_klass()); 159 } 160 161 push_node(bt, ld); 162 } 163 164 165 //--------------------------------array_store---------------------------------- 166 void Parse::array_store(BasicType bt) { 167 const Type* elemtype = Type::TOP; 168 Node* adr = array_addressing(bt, type2size[bt], &elemtype); 169 if (stopped()) return; // guaranteed null or range check 170 Node* cast_val = NULL; 171 if (bt == T_OBJECT) { 172 cast_val = array_store_check(); 173 if (stopped()) return; 174 } 175 Node* val = pop_node(bt); // Value to store 176 Node* idx = pop(); // Index in the array 177 Node* ary = pop(); // The array itself 178 179 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 180 if (bt == T_OBJECT) { 181 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 182 const Type* val_t = _gvn.type(val); 183 if (elemtype->isa_valuetype() != NULL) { 184 // Store to flattened value type array 185 if (!val->is_ValueType() && val_t == TypePtr::NULL_PTR) { 186 // Can not store null into a value type array 187 inc_sp(3); 188 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); 189 return; 190 } 191 cast_val->as_ValueType()->store_flattened(this, ary, adr); 192 return; 193 } else if (elemptr->is_valuetypeptr()) { 194 // Store to non-flattened value type array 195 if (!val->is_ValueType() && val_t == TypePtr::NULL_PTR) { 196 // Can not store null into a value type array 197 inc_sp(3); 198 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); 199 return; 200 } 201 } else if (elemptr->can_be_value_type() && !ary_t->klass_is_exact() && 202 (val->is_ValueType() || val_t == TypePtr::NULL_PTR || val_t->is_oopptr()->can_be_value_type())) { 203 if (ValueArrayFlatten) { 204 IdealKit ideal(this); 205 Node* kls = load_object_klass(ary); 206 Node* layout_val = load_lh_array_tag(kls); 207 ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { 208 // non flattened 209 sync_kit(ideal); 210 211 if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { 212 gen_value_type_array_guard(ary, val, 3); 213 } 214 215 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 216 elemtype = ary_t->elem()->make_oopptr(); 217 access_store_at(control(), ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); 218 ideal.sync_kit(this); 219 } ideal.else_(); { 220 // flattened 221 // Object/interface array must be flattened, cast it 222 if (val->is_ValueType()) { 223 sync_kit(ideal); 224 const TypeValueType* vt = _gvn.type(val)->is_valuetype(); 225 ciArrayKlass* array_klass = ciArrayKlass::make(vt->value_klass()); 226 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); 227 ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); 228 adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control()); 229 val->as_ValueType()->store_flattened(this, ary, adr); 230 ideal.sync_kit(this); 231 } else { 232 if (TypePtr::NULL_PTR->higher_equal(val_t)) { 233 sync_kit(ideal); 234 Node* null_ctl = top(); 235 val = null_check_oop(val, &null_ctl); 236 { 237 assert(null_ctl != top(), "expected to possibly be null"); 238 PreserveJVMState pjvms(this); 239 set_control(null_ctl); 240 inc_sp(3); 241 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); 242 } 243 ideal.sync_kit(this); 244 } 245 246 if (!ideal.ctrl()->is_top()) { 247 ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(), 248 CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value), 249 "store_unknown_value", 250 val, ary, idx); 251 } 252 } 253 } ideal.end_if(); 254 sync_kit(ideal); 255 return; 256 } else { 257 if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { 258 gen_value_type_array_guard(ary, val, 3); 259 } 260 } 261 } 262 } 263 264 if (elemtype == TypeInt::BOOL) { 265 bt = T_BOOLEAN; 266 } else if (bt == T_OBJECT) { 267 elemtype = ary_t->elem()->make_oopptr(); 268 } 269 270 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 271 272 access_store_at(control(), ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); 273 } 274 275 276 //------------------------------array_addressing------------------------------- 277 // Pull array and index from the stack. Compute pointer-to-element. 278 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 279 Node *idx = peek(0+vals); // Get from stack without popping 280 Node *ary = peek(1+vals); // in case of exception 281 282 // Null check the array base, with correct stack contents 283 ary = null_check(ary, T_ARRAY); 284 // Compile-time detect of null-exception? 285 if (stopped()) return top(); 286 287 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 288 const TypeInt* sizetype = arytype->size(); 289 const Type* elemtype = arytype->elem(); 290 291 if (UseUniqueSubclasses && result2 != NULL) { 292 const Type* el = elemtype->make_ptr(); 293 if (el && el->isa_instptr()) { 294 const TypeInstPtr* toop = el->is_instptr(); 295 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 296 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 297 const Type* subklass = Type::get_const_type(toop->klass()); 298 elemtype = subklass->join_speculative(el); 299 } 300 } 301 } 302 303 // Check for big class initializers with all constant offsets 304 // feeding into a known-size array. 305 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 306 // See if the highest idx value is less than the lowest array bound, 307 // and if the idx value cannot be negative: 308 bool need_range_check = true; 309 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 310 need_range_check = false; 311 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 312 } 313 314 ciKlass * arytype_klass = arytype->klass(); 315 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 316 // Only fails for some -Xcomp runs 317 // The class is unloaded. We have to run this bytecode in the interpreter. 318 uncommon_trap(Deoptimization::Reason_unloaded, 319 Deoptimization::Action_reinterpret, 320 arytype->klass(), "!loaded array"); 321 return top(); 322 } 323 324 // Do the range check 325 if (GenerateRangeChecks && need_range_check) { 326 Node* tst; 327 if (sizetype->_hi <= 0) { 328 // The greatest array bound is negative, so we can conclude that we're 329 // compiling unreachable code, but the unsigned compare trick used below 330 // only works with non-negative lengths. Instead, hack "tst" to be zero so 331 // the uncommon_trap path will always be taken. 332 tst = _gvn.intcon(0); 333 } else { 334 // Range is constant in array-oop, so we can use the original state of mem 335 Node* len = load_array_length(ary); 336 337 // Test length vs index (standard trick using unsigned compare) 338 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 339 BoolTest::mask btest = BoolTest::lt; 340 tst = _gvn.transform( new BoolNode(chk, btest) ); 341 } 342 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 343 _gvn.set_type(rc, rc->Value(&_gvn)); 344 if (!tst->is_Con()) { 345 record_for_igvn(rc); 346 } 347 set_control(_gvn.transform(new IfTrueNode(rc))); 348 // Branch to failure if out of bounds 349 { 350 PreserveJVMState pjvms(this); 351 set_control(_gvn.transform(new IfFalseNode(rc))); 352 if (C->allow_range_check_smearing()) { 353 // Do not use builtin_throw, since range checks are sometimes 354 // made more stringent by an optimistic transformation. 355 // This creates "tentative" range checks at this point, 356 // which are not guaranteed to throw exceptions. 357 // See IfNode::Ideal, is_range_check, adjust_check. 358 uncommon_trap(Deoptimization::Reason_range_check, 359 Deoptimization::Action_make_not_entrant, 360 NULL, "range_check"); 361 } else { 362 // If we have already recompiled with the range-check-widening 363 // heroic optimization turned off, then we must really be throwing 364 // range check exceptions. 365 builtin_throw(Deoptimization::Reason_range_check, idx); 366 } 367 } 368 } 369 // Check for always knowing you are throwing a range-check exception 370 if (stopped()) return top(); 371 372 // Make array address computation control dependent to prevent it 373 // from floating above the range check during loop optimizations. 374 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 375 376 if (result2 != NULL) *result2 = elemtype; 377 378 assert(ptr != top(), "top should go hand-in-hand with stopped"); 379 380 return ptr; 381 } 382 383 384 // returns IfNode 385 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) { 386 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 387 Node *tst = _gvn.transform(new BoolNode(cmp, mask)); 388 IfNode *iff = create_and_map_if(control(), tst, prob, cnt); 389 return iff; 390 } 391 392 // return Region node 393 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 394 Node *region = new RegionNode(3); // 2 results 395 record_for_igvn(region); 396 region->init_req(1, iffalse); 397 region->init_req(2, iftrue ); 398 _gvn.set_type(region, Type::CONTROL); 399 region = _gvn.transform(region); 400 set_control (region); 401 return region; 402 } 403 404 // sentinel value for the target bci to mark never taken branches 405 // (according to profiling) 406 static const int never_reached = INT_MAX; 407 408 //------------------------------helper for tableswitch------------------------- 409 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 410 // True branch, use existing map info 411 { PreserveJVMState pjvms(this); 412 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 413 set_control( iftrue ); 414 if (unc) { 415 repush_if_args(); 416 uncommon_trap(Deoptimization::Reason_unstable_if, 417 Deoptimization::Action_reinterpret, 418 NULL, 419 "taken always"); 420 } else { 421 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 422 profile_switch_case(prof_table_index); 423 merge_new_path(dest_bci_if_true); 424 } 425 } 426 427 // False branch 428 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 429 set_control( iffalse ); 430 } 431 432 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 433 // True branch, use existing map info 434 { PreserveJVMState pjvms(this); 435 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 436 set_control( iffalse ); 437 if (unc) { 438 repush_if_args(); 439 uncommon_trap(Deoptimization::Reason_unstable_if, 440 Deoptimization::Action_reinterpret, 441 NULL, 442 "taken never"); 443 } else { 444 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 445 profile_switch_case(prof_table_index); 446 merge_new_path(dest_bci_if_true); 447 } 448 } 449 450 // False branch 451 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 452 set_control( iftrue ); 453 } 454 455 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index, bool unc) { 456 // False branch, use existing map and control() 457 if (unc) { 458 repush_if_args(); 459 uncommon_trap(Deoptimization::Reason_unstable_if, 460 Deoptimization::Action_reinterpret, 461 NULL, 462 "taken never"); 463 } else { 464 assert(dest_bci != never_reached, "inconsistent dest"); 465 profile_switch_case(prof_table_index); 466 merge_new_path(dest_bci); 467 } 468 } 469 470 471 extern "C" { 472 static int jint_cmp(const void *i, const void *j) { 473 int a = *(jint *)i; 474 int b = *(jint *)j; 475 return a > b ? 1 : a < b ? -1 : 0; 476 } 477 } 478 479 480 // Default value for methodData switch indexing. Must be a negative value to avoid 481 // conflict with any legal switch index. 482 #define NullTableIndex -1 483 484 class SwitchRange : public StackObj { 485 // a range of integers coupled with a bci destination 486 jint _lo; // inclusive lower limit 487 jint _hi; // inclusive upper limit 488 int _dest; 489 int _table_index; // index into method data table 490 float _cnt; // how many times this range was hit according to profiling 491 492 public: 493 jint lo() const { return _lo; } 494 jint hi() const { return _hi; } 495 int dest() const { return _dest; } 496 int table_index() const { return _table_index; } 497 bool is_singleton() const { return _lo == _hi; } 498 float cnt() const { return _cnt; } 499 500 void setRange(jint lo, jint hi, int dest, int table_index, float cnt) { 501 assert(lo <= hi, "must be a non-empty range"); 502 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; _cnt = cnt; 503 assert(_cnt >= 0, ""); 504 } 505 bool adjoinRange(jint lo, jint hi, int dest, int table_index, float cnt, bool trim_ranges) { 506 assert(lo <= hi, "must be a non-empty range"); 507 if (lo == _hi+1 && table_index == _table_index) { 508 // see merge_ranges() comment below 509 if (trim_ranges) { 510 if (cnt == 0) { 511 if (_cnt != 0) { 512 return false; 513 } 514 if (dest != _dest) { 515 _dest = never_reached; 516 } 517 } else { 518 if (_cnt == 0) { 519 return false; 520 } 521 if (dest != _dest) { 522 return false; 523 } 524 } 525 } else { 526 if (dest != _dest) { 527 return false; 528 } 529 } 530 _hi = hi; 531 _cnt += cnt; 532 return true; 533 } 534 return false; 535 } 536 537 void set (jint value, int dest, int table_index, float cnt) { 538 setRange(value, value, dest, table_index, cnt); 539 } 540 bool adjoin(jint value, int dest, int table_index, float cnt, bool trim_ranges) { 541 return adjoinRange(value, value, dest, table_index, cnt, trim_ranges); 542 } 543 bool adjoin(SwitchRange& other) { 544 return adjoinRange(other._lo, other._hi, other._dest, other._table_index, other._cnt, false); 545 } 546 547 void print() { 548 if (is_singleton()) 549 tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt()); 550 else if (lo() == min_jint) 551 tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt()); 552 else if (hi() == max_jint) 553 tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt()); 554 else 555 tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt()); 556 } 557 }; 558 559 // We try to minimize the number of ranges and the size of the taken 560 // ones using profiling data. When ranges are created, 561 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge 562 // if both were never hit or both were hit to build longer unreached 563 // ranges. Here, we now merge adjoining ranges with the same 564 // destination and finally set destination of unreached ranges to the 565 // special value never_reached because it can help minimize the number 566 // of tests that are necessary. 567 // 568 // For instance: 569 // [0, 1] to target1 sometimes taken 570 // [1, 2] to target1 never taken 571 // [2, 3] to target2 never taken 572 // would lead to: 573 // [0, 1] to target1 sometimes taken 574 // [1, 3] never taken 575 // 576 // (first 2 ranges to target1 are not merged) 577 static void merge_ranges(SwitchRange* ranges, int& rp) { 578 if (rp == 0) { 579 return; 580 } 581 int shift = 0; 582 for (int j = 0; j < rp; j++) { 583 SwitchRange& r1 = ranges[j-shift]; 584 SwitchRange& r2 = ranges[j+1]; 585 if (r1.adjoin(r2)) { 586 shift++; 587 } else if (shift > 0) { 588 ranges[j+1-shift] = r2; 589 } 590 } 591 rp -= shift; 592 for (int j = 0; j <= rp; j++) { 593 SwitchRange& r = ranges[j]; 594 if (r.cnt() == 0 && r.dest() != never_reached) { 595 r.setRange(r.lo(), r.hi(), never_reached, r.table_index(), r.cnt()); 596 } 597 } 598 } 599 600 //-------------------------------do_tableswitch-------------------------------- 601 void Parse::do_tableswitch() { 602 Node* lookup = pop(); 603 // Get information about tableswitch 604 int default_dest = iter().get_dest_table(0); 605 int lo_index = iter().get_int_table(1); 606 int hi_index = iter().get_int_table(2); 607 int len = hi_index - lo_index + 1; 608 609 if (len < 1) { 610 // If this is a backward branch, add safepoint 611 maybe_add_safepoint(default_dest); 612 merge(default_dest); 613 return; 614 } 615 616 ciMethodData* methodData = method()->method_data(); 617 ciMultiBranchData* profile = NULL; 618 if (methodData->is_mature() && UseSwitchProfiling) { 619 ciProfileData* data = methodData->bci_to_data(bci()); 620 if (data != NULL && data->is_MultiBranchData()) { 621 profile = (ciMultiBranchData*)data; 622 } 623 } 624 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 625 626 // generate decision tree, using trichotomy when possible 627 int rnum = len+2; 628 bool makes_backward_branch = false; 629 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 630 int rp = -1; 631 if (lo_index != min_jint) { 632 uint cnt = 1; 633 if (profile != NULL) { 634 cnt = profile->default_count() / (hi_index != max_jint ? 2 : 1); 635 } 636 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex, cnt); 637 } 638 for (int j = 0; j < len; j++) { 639 jint match_int = lo_index+j; 640 int dest = iter().get_dest_table(j+3); 641 makes_backward_branch |= (dest <= bci()); 642 int table_index = method_data_update() ? j : NullTableIndex; 643 uint cnt = 1; 644 if (profile != NULL) { 645 cnt = profile->count_at(j); 646 } 647 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 648 ranges[++rp].set(match_int, dest, table_index, cnt); 649 } 650 } 651 jint highest = lo_index+(len-1); 652 assert(ranges[rp].hi() == highest, ""); 653 if (highest != max_jint) { 654 uint cnt = 1; 655 if (profile != NULL) { 656 cnt = profile->default_count() / (lo_index != min_jint ? 2 : 1); 657 } 658 if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, cnt, trim_ranges)) { 659 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, cnt); 660 } 661 } 662 assert(rp < len+2, "not too many ranges"); 663 664 if (trim_ranges) { 665 merge_ranges(ranges, rp); 666 } 667 668 // Safepoint in case if backward branch observed 669 if( makes_backward_branch && UseLoopSafepoints ) 670 add_safepoint(); 671 672 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 673 } 674 675 676 //------------------------------do_lookupswitch-------------------------------- 677 void Parse::do_lookupswitch() { 678 Node *lookup = pop(); // lookup value 679 // Get information about lookupswitch 680 int default_dest = iter().get_dest_table(0); 681 int len = iter().get_int_table(1); 682 683 if (len < 1) { // If this is a backward branch, add safepoint 684 maybe_add_safepoint(default_dest); 685 merge(default_dest); 686 return; 687 } 688 689 ciMethodData* methodData = method()->method_data(); 690 ciMultiBranchData* profile = NULL; 691 if (methodData->is_mature() && UseSwitchProfiling) { 692 ciProfileData* data = methodData->bci_to_data(bci()); 693 if (data != NULL && data->is_MultiBranchData()) { 694 profile = (ciMultiBranchData*)data; 695 } 696 } 697 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 698 699 // generate decision tree, using trichotomy when possible 700 jint* table = NEW_RESOURCE_ARRAY(jint, len*3); 701 { 702 for (int j = 0; j < len; j++) { 703 table[3*j+0] = iter().get_int_table(2+2*j); 704 table[3*j+1] = iter().get_dest_table(2+2*j+1); 705 table[3*j+2] = profile == NULL ? 1 : profile->count_at(j); 706 } 707 qsort(table, len, 3*sizeof(table[0]), jint_cmp); 708 } 709 710 float defaults = 0; 711 jint prev = min_jint; 712 for (int j = 0; j < len; j++) { 713 jint match_int = table[3*j+0]; 714 if (match_int != prev) { 715 defaults += (float)match_int - prev; 716 } 717 prev = match_int+1; 718 } 719 if (prev-1 != max_jint) { 720 defaults += (float)max_jint - prev + 1; 721 } 722 float default_cnt = 1; 723 if (profile != NULL) { 724 default_cnt = profile->default_count()/defaults; 725 } 726 727 int rnum = len*2+1; 728 bool makes_backward_branch = false; 729 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 730 int rp = -1; 731 for (int j = 0; j < len; j++) { 732 jint match_int = table[3*j+0]; 733 int dest = table[3*j+1]; 734 int cnt = table[3*j+2]; 735 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 736 int table_index = method_data_update() ? j : NullTableIndex; 737 makes_backward_branch |= (dest <= bci()); 738 float c = default_cnt * ((float)match_int - next_lo); 739 if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, NullTableIndex, c, trim_ranges))) { 740 assert(default_dest != never_reached, "sentinel value for dead destinations"); 741 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex, c); 742 } 743 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 744 assert(dest != never_reached, "sentinel value for dead destinations"); 745 ranges[++rp].set(match_int, dest, table_index, cnt); 746 } 747 } 748 jint highest = table[3*(len-1)]; 749 assert(ranges[rp].hi() == highest, ""); 750 if (highest != max_jint && 751 !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest), trim_ranges)) { 752 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest)); 753 } 754 assert(rp < rnum, "not too many ranges"); 755 756 if (trim_ranges) { 757 merge_ranges(ranges, rp); 758 } 759 760 // Safepoint in case backward branch observed 761 if (makes_backward_branch && UseLoopSafepoints) 762 add_safepoint(); 763 764 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 765 } 766 767 static float if_prob(float taken_cnt, float total_cnt) { 768 assert(taken_cnt <= total_cnt, ""); 769 if (total_cnt == 0) { 770 return PROB_FAIR; 771 } 772 float p = taken_cnt / total_cnt; 773 return MIN2(MAX2(p, PROB_MIN), PROB_MAX); 774 } 775 776 static float if_cnt(float cnt) { 777 if (cnt == 0) { 778 return COUNT_UNKNOWN; 779 } 780 return cnt; 781 } 782 783 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) { 784 float total_cnt = 0; 785 for (SwitchRange* sr = lo; sr <= hi; sr++) { 786 total_cnt += sr->cnt(); 787 } 788 return total_cnt; 789 } 790 791 class SwitchRanges : public ResourceObj { 792 public: 793 SwitchRange* _lo; 794 SwitchRange* _hi; 795 SwitchRange* _mid; 796 float _cost; 797 798 enum { 799 Start, 800 LeftDone, 801 RightDone, 802 Done 803 } _state; 804 805 SwitchRanges(SwitchRange *lo, SwitchRange *hi) 806 : _lo(lo), _hi(hi), _mid(NULL), 807 _cost(0), _state(Start) { 808 } 809 810 SwitchRanges() 811 : _lo(NULL), _hi(NULL), _mid(NULL), 812 _cost(0), _state(Start) {} 813 }; 814 815 // Estimate cost of performing a binary search on lo..hi 816 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) { 817 GrowableArray<SwitchRanges> tree; 818 SwitchRanges root(lo, hi); 819 tree.push(root); 820 821 float cost = 0; 822 do { 823 SwitchRanges& r = *tree.adr_at(tree.length()-1); 824 if (r._hi != r._lo) { 825 if (r._mid == NULL) { 826 float r_cnt = sum_of_cnts(r._lo, r._hi); 827 828 if (r_cnt == 0) { 829 tree.pop(); 830 cost = 0; 831 continue; 832 } 833 834 SwitchRange* mid = NULL; 835 mid = r._lo; 836 for (float cnt = 0; ; ) { 837 assert(mid <= r._hi, "out of bounds"); 838 cnt += mid->cnt(); 839 if (cnt > r_cnt / 2) { 840 break; 841 } 842 mid++; 843 } 844 assert(mid <= r._hi, "out of bounds"); 845 r._mid = mid; 846 r._cost = r_cnt / total_cnt; 847 } 848 r._cost += cost; 849 if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) { 850 cost = 0; 851 r._state = SwitchRanges::LeftDone; 852 tree.push(SwitchRanges(r._lo, r._mid-1)); 853 } else if (r._state < SwitchRanges::RightDone) { 854 cost = 0; 855 r._state = SwitchRanges::RightDone; 856 tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi)); 857 } else { 858 tree.pop(); 859 cost = r._cost; 860 } 861 } else { 862 tree.pop(); 863 cost = r._cost; 864 } 865 } while (tree.length() > 0); 866 867 868 return cost; 869 } 870 871 // It sometimes pays off to test most common ranges before the binary search 872 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) { 873 uint nr = hi - lo + 1; 874 float total_cnt = sum_of_cnts(lo, hi); 875 876 float min = compute_tree_cost(lo, hi, total_cnt); 877 float extra = 1; 878 float sub = 0; 879 880 SwitchRange* array1 = lo; 881 SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr); 882 883 SwitchRange* ranges = NULL; 884 885 while (nr >= 2) { 886 assert(lo == array1 || lo == array2, "one the 2 already allocated arrays"); 887 ranges = (lo == array1) ? array2 : array1; 888 889 // Find highest frequency range 890 SwitchRange* candidate = lo; 891 for (SwitchRange* sr = lo+1; sr <= hi; sr++) { 892 if (sr->cnt() > candidate->cnt()) { 893 candidate = sr; 894 } 895 } 896 SwitchRange most_freq = *candidate; 897 if (most_freq.cnt() == 0) { 898 break; 899 } 900 901 // Copy remaining ranges into another array 902 int shift = 0; 903 for (uint i = 0; i < nr; i++) { 904 SwitchRange* sr = &lo[i]; 905 if (sr != candidate) { 906 ranges[i-shift] = *sr; 907 } else { 908 shift++; 909 if (i > 0 && i < nr-1) { 910 SwitchRange prev = lo[i-1]; 911 prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.table_index(), prev.cnt()); 912 if (prev.adjoin(lo[i+1])) { 913 shift++; 914 i++; 915 } 916 ranges[i-shift] = prev; 917 } 918 } 919 } 920 nr -= shift; 921 922 // Evaluate cost of testing the most common range and performing a 923 // binary search on the other ranges 924 float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt); 925 if (cost >= min) { 926 break; 927 } 928 // swap arrays 929 lo = &ranges[0]; 930 hi = &ranges[nr-1]; 931 932 // It pays off: emit the test for the most common range 933 assert(most_freq.cnt() > 0, "must be taken"); 934 Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo()))); 935 Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo()))); 936 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le)); 937 IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt())); 938 jump_if_true_fork(iff, most_freq.dest(), most_freq.table_index(), false); 939 940 sub += most_freq.cnt() / total_cnt; 941 extra += 1 - sub; 942 min = cost; 943 } 944 } 945 946 //----------------------------create_jump_tables------------------------------- 947 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 948 // Are jumptables enabled 949 if (!UseJumpTables) return false; 950 951 // Are jumptables supported 952 if (!Matcher::has_match_rule(Op_Jump)) return false; 953 954 // Don't make jump table if profiling 955 if (method_data_update()) return false; 956 957 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 958 959 // Decide if a guard is needed to lop off big ranges at either (or 960 // both) end(s) of the input set. We'll call this the default target 961 // even though we can't be sure that it is the true "default". 962 963 bool needs_guard = false; 964 int default_dest; 965 int64_t total_outlier_size = 0; 966 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 967 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 968 969 if (lo->dest() == hi->dest()) { 970 total_outlier_size = hi_size + lo_size; 971 default_dest = lo->dest(); 972 } else if (lo_size > hi_size) { 973 total_outlier_size = lo_size; 974 default_dest = lo->dest(); 975 } else { 976 total_outlier_size = hi_size; 977 default_dest = hi->dest(); 978 } 979 980 float total = sum_of_cnts(lo, hi); 981 float cost = compute_tree_cost(lo, hi, total); 982 983 // If a guard test will eliminate very sparse end ranges, then 984 // it is worth the cost of an extra jump. 985 float trimmed_cnt = 0; 986 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 987 needs_guard = true; 988 if (default_dest == lo->dest()) { 989 trimmed_cnt += lo->cnt(); 990 lo++; 991 } 992 if (default_dest == hi->dest()) { 993 trimmed_cnt += hi->cnt(); 994 hi--; 995 } 996 } 997 998 // Find the total number of cases and ranges 999 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 1000 int num_range = hi - lo + 1; 1001 1002 // Don't create table if: too large, too small, or too sparse. 1003 if (num_cases > MaxJumpTableSize) 1004 return false; 1005 if (UseSwitchProfiling) { 1006 // MinJumpTableSize is set so with a well balanced binary tree, 1007 // when the number of ranges is MinJumpTableSize, it's cheaper to 1008 // go through a JumpNode that a tree of IfNodes. Average cost of a 1009 // tree of IfNodes with MinJumpTableSize is 1010 // log2f(MinJumpTableSize) comparisons. So if the cost computed 1011 // from profile data is less than log2f(MinJumpTableSize) then 1012 // going with the binary search is cheaper. 1013 if (cost < log2f(MinJumpTableSize)) { 1014 return false; 1015 } 1016 } else { 1017 if (num_cases < MinJumpTableSize) 1018 return false; 1019 } 1020 if (num_cases > (MaxJumpTableSparseness * num_range)) 1021 return false; 1022 1023 // Normalize table lookups to zero 1024 int lowval = lo->lo(); 1025 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 1026 1027 // Generate a guard to protect against input keyvals that aren't 1028 // in the switch domain. 1029 if (needs_guard) { 1030 Node* size = _gvn.intcon(num_cases); 1031 Node* cmp = _gvn.transform(new CmpUNode(key_val, size)); 1032 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge)); 1033 IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt)); 1034 jump_if_true_fork(iff, default_dest, NullTableIndex, trim_ranges && trimmed_cnt == 0); 1035 1036 total -= trimmed_cnt; 1037 } 1038 1039 // Create an ideal node JumpTable that has projections 1040 // of all possible ranges for a switch statement 1041 // The key_val input must be converted to a pointer offset and scaled. 1042 // Compare Parse::array_addressing above. 1043 1044 // Clean the 32-bit int into a real 64-bit offset. 1045 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 1046 const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); 1047 // Make I2L conversion control dependent to prevent it from 1048 // floating above the range check during loop optimizations. 1049 key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); 1050 1051 // Shift the value by wordsize so we have an index into the table, rather 1052 // than a switch value 1053 Node *shiftWord = _gvn.MakeConX(wordSize); 1054 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 1055 1056 // Create the JumpNode 1057 Arena* arena = C->comp_arena(); 1058 float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases); 1059 int i = 0; 1060 if (total == 0) { 1061 for (SwitchRange* r = lo; r <= hi; r++) { 1062 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1063 probs[i] = 1.0F / num_cases; 1064 } 1065 } 1066 } else { 1067 for (SwitchRange* r = lo; r <= hi; r++) { 1068 float prob = r->cnt()/total; 1069 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1070 probs[i] = prob / (r->hi() - r->lo() + 1); 1071 } 1072 } 1073 } 1074 1075 ciMethodData* methodData = method()->method_data(); 1076 ciMultiBranchData* profile = NULL; 1077 if (methodData->is_mature()) { 1078 ciProfileData* data = methodData->bci_to_data(bci()); 1079 if (data != NULL && data->is_MultiBranchData()) { 1080 profile = (ciMultiBranchData*)data; 1081 } 1082 } 1083 1084 Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total)); 1085 1086 // These are the switch destinations hanging off the jumpnode 1087 i = 0; 1088 for (SwitchRange* r = lo; r <= hi; r++) { 1089 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1090 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 1091 { 1092 PreserveJVMState pjvms(this); 1093 set_control(input); 1094 jump_if_always_fork(r->dest(), r->table_index(), trim_ranges && r->cnt() == 0); 1095 } 1096 } 1097 } 1098 assert(i == num_cases, "miscount of cases"); 1099 stop_and_kill_map(); // no more uses for this JVMS 1100 return true; 1101 } 1102 1103 //----------------------------jump_switch_ranges------------------------------- 1104 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 1105 Block* switch_block = block(); 1106 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 1107 1108 if (switch_depth == 0) { 1109 // Do special processing for the top-level call. 1110 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 1111 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 1112 1113 // Decrement pred-numbers for the unique set of nodes. 1114 #ifdef ASSERT 1115 if (!trim_ranges) { 1116 // Ensure that the block's successors are a (duplicate-free) set. 1117 int successors_counted = 0; // block occurrences in [hi..lo] 1118 int unique_successors = switch_block->num_successors(); 1119 for (int i = 0; i < unique_successors; i++) { 1120 Block* target = switch_block->successor_at(i); 1121 1122 // Check that the set of successors is the same in both places. 1123 int successors_found = 0; 1124 for (SwitchRange* p = lo; p <= hi; p++) { 1125 if (p->dest() == target->start()) successors_found++; 1126 } 1127 assert(successors_found > 0, "successor must be known"); 1128 successors_counted += successors_found; 1129 } 1130 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 1131 } 1132 #endif 1133 1134 // Maybe prune the inputs, based on the type of key_val. 1135 jint min_val = min_jint; 1136 jint max_val = max_jint; 1137 const TypeInt* ti = key_val->bottom_type()->isa_int(); 1138 if (ti != NULL) { 1139 min_val = ti->_lo; 1140 max_val = ti->_hi; 1141 assert(min_val <= max_val, "invalid int type"); 1142 } 1143 while (lo->hi() < min_val) { 1144 lo++; 1145 } 1146 if (lo->lo() < min_val) { 1147 lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index(), lo->cnt()); 1148 } 1149 while (hi->lo() > max_val) { 1150 hi--; 1151 } 1152 if (hi->hi() > max_val) { 1153 hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index(), hi->cnt()); 1154 } 1155 1156 linear_search_switch_ranges(key_val, lo, hi); 1157 } 1158 1159 #ifndef PRODUCT 1160 if (switch_depth == 0) { 1161 _max_switch_depth = 0; 1162 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 1163 } 1164 #endif 1165 1166 assert(lo <= hi, "must be a non-empty set of ranges"); 1167 if (lo == hi) { 1168 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1169 } else { 1170 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 1171 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 1172 1173 if (create_jump_tables(key_val, lo, hi)) return; 1174 1175 SwitchRange* mid = NULL; 1176 float total_cnt = sum_of_cnts(lo, hi); 1177 1178 int nr = hi - lo + 1; 1179 if (UseSwitchProfiling) { 1180 // Don't keep the binary search tree balanced: pick up mid point 1181 // that split frequencies in half. 1182 float cnt = 0; 1183 for (SwitchRange* sr = lo; sr <= hi; sr++) { 1184 cnt += sr->cnt(); 1185 if (cnt >= total_cnt / 2) { 1186 mid = sr; 1187 break; 1188 } 1189 } 1190 } else { 1191 mid = lo + nr/2; 1192 1193 // if there is an easy choice, pivot at a singleton: 1194 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 1195 1196 assert(lo < mid && mid <= hi, "good pivot choice"); 1197 assert(nr != 2 || mid == hi, "should pick higher of 2"); 1198 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 1199 } 1200 1201 1202 Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo()); 1203 1204 if (mid->is_singleton()) { 1205 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt())); 1206 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index(), trim_ranges && mid->cnt() == 0); 1207 1208 // Special Case: If there are exactly three ranges, and the high 1209 // and low range each go to the same place, omit the "gt" test, 1210 // since it will not discriminate anything. 1211 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo; 1212 1213 // if there is a higher range, test for it and process it: 1214 if (mid < hi && !eq_test_only) { 1215 // two comparisons of same values--should enable 1 test for 2 branches 1216 // Use BoolTest::le instead of BoolTest::gt 1217 float cnt = sum_of_cnts(lo, mid-1); 1218 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le, if_prob(cnt, total_cnt), if_cnt(cnt)); 1219 Node *iftrue = _gvn.transform( new IfTrueNode(iff_le) ); 1220 Node *iffalse = _gvn.transform( new IfFalseNode(iff_le) ); 1221 { PreserveJVMState pjvms(this); 1222 set_control(iffalse); 1223 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 1224 } 1225 set_control(iftrue); 1226 } 1227 1228 } else { 1229 // mid is a range, not a singleton, so treat mid..hi as a unit 1230 float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi); 1231 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt)); 1232 1233 // if there is a higher range, test for it and process it: 1234 if (mid == hi) { 1235 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index(), trim_ranges && cnt == 0); 1236 } else { 1237 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 1238 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 1239 { PreserveJVMState pjvms(this); 1240 set_control(iftrue); 1241 jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1); 1242 } 1243 set_control(iffalse); 1244 } 1245 } 1246 1247 // in any case, process the lower range 1248 if (mid == lo) { 1249 if (mid->is_singleton()) { 1250 jump_switch_ranges(key_val, lo+1, hi, switch_depth+1); 1251 } else { 1252 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1253 } 1254 } else { 1255 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 1256 } 1257 } 1258 1259 // Decrease pred_count for each successor after all is done. 1260 if (switch_depth == 0) { 1261 int unique_successors = switch_block->num_successors(); 1262 for (int i = 0; i < unique_successors; i++) { 1263 Block* target = switch_block->successor_at(i); 1264 // Throw away the pre-allocated path for each unique successor. 1265 target->next_path_num(); 1266 } 1267 } 1268 1269 #ifndef PRODUCT 1270 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 1271 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 1272 SwitchRange* r; 1273 int nsing = 0; 1274 for( r = lo; r <= hi; r++ ) { 1275 if( r->is_singleton() ) nsing++; 1276 } 1277 tty->print(">>> "); 1278 _method->print_short_name(); 1279 tty->print_cr(" switch decision tree"); 1280 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 1281 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 1282 if (_max_switch_depth > _est_switch_depth) { 1283 tty->print_cr("******** BAD SWITCH DEPTH ********"); 1284 } 1285 tty->print(" "); 1286 for( r = lo; r <= hi; r++ ) { 1287 r->print(); 1288 } 1289 tty->cr(); 1290 } 1291 #endif 1292 } 1293 1294 void Parse::modf() { 1295 Node *f2 = pop(); 1296 Node *f1 = pop(); 1297 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 1298 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 1299 "frem", NULL, //no memory effects 1300 f1, f2); 1301 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1302 1303 push(res); 1304 } 1305 1306 void Parse::modd() { 1307 Node *d2 = pop_pair(); 1308 Node *d1 = pop_pair(); 1309 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 1310 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 1311 "drem", NULL, //no memory effects 1312 d1, top(), d2, top()); 1313 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1314 1315 #ifdef ASSERT 1316 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 1317 assert(res_top == top(), "second value must be top"); 1318 #endif 1319 1320 push_pair(res_d); 1321 } 1322 1323 void Parse::l2f() { 1324 Node* f2 = pop(); 1325 Node* f1 = pop(); 1326 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 1327 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 1328 "l2f", NULL, //no memory effects 1329 f1, f2); 1330 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1331 1332 push(res); 1333 } 1334 1335 void Parse::do_irem() { 1336 // Must keep both values on the expression-stack during null-check 1337 zero_check_int(peek()); 1338 // Compile-time detect of null-exception? 1339 if (stopped()) return; 1340 1341 Node* b = pop(); 1342 Node* a = pop(); 1343 1344 const Type *t = _gvn.type(b); 1345 if (t != Type::TOP) { 1346 const TypeInt *ti = t->is_int(); 1347 if (ti->is_con()) { 1348 int divisor = ti->get_con(); 1349 // check for positive power of 2 1350 if (divisor > 0 && 1351 (divisor & ~(divisor-1)) == divisor) { 1352 // yes ! 1353 Node *mask = _gvn.intcon((divisor - 1)); 1354 // Sigh, must handle negative dividends 1355 Node *zero = _gvn.intcon(0); 1356 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt, PROB_FAIR, COUNT_UNKNOWN); 1357 Node *iff = _gvn.transform( new IfFalseNode(ifff) ); 1358 Node *ift = _gvn.transform( new IfTrueNode (ifff) ); 1359 Node *reg = jump_if_join(ift, iff); 1360 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 1361 // Negative path; negate/and/negate 1362 Node *neg = _gvn.transform( new SubINode(zero, a) ); 1363 Node *andn= _gvn.transform( new AndINode(neg, mask) ); 1364 Node *negn= _gvn.transform( new SubINode(zero, andn) ); 1365 phi->init_req(1, negn); 1366 // Fast positive case 1367 Node *andx = _gvn.transform( new AndINode(a, mask) ); 1368 phi->init_req(2, andx); 1369 // Push the merge 1370 push( _gvn.transform(phi) ); 1371 return; 1372 } 1373 } 1374 } 1375 // Default case 1376 push( _gvn.transform( new ModINode(control(),a,b) ) ); 1377 } 1378 1379 // Handle jsr and jsr_w bytecode 1380 void Parse::do_jsr() { 1381 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 1382 1383 // Store information about current state, tagged with new _jsr_bci 1384 int return_bci = iter().next_bci(); 1385 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 1386 1387 // Update method data 1388 profile_taken_branch(jsr_bci); 1389 1390 // The way we do things now, there is only one successor block 1391 // for the jsr, because the target code is cloned by ciTypeFlow. 1392 Block* target = successor_for_bci(jsr_bci); 1393 1394 // What got pushed? 1395 const Type* ret_addr = target->peek(); 1396 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 1397 1398 // Effect on jsr on stack 1399 push(_gvn.makecon(ret_addr)); 1400 1401 // Flow to the jsr. 1402 merge(jsr_bci); 1403 } 1404 1405 // Handle ret bytecode 1406 void Parse::do_ret() { 1407 // Find to whom we return. 1408 assert(block()->num_successors() == 1, "a ret can only go one place now"); 1409 Block* target = block()->successor_at(0); 1410 assert(!target->is_ready(), "our arrival must be expected"); 1411 profile_ret(target->flow()->start()); 1412 int pnum = target->next_path_num(); 1413 merge_common(target, pnum); 1414 } 1415 1416 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 1417 if (btest != BoolTest::eq && btest != BoolTest::ne) { 1418 // Only ::eq and ::ne are supported for profile injection. 1419 return false; 1420 } 1421 if (test->is_Cmp() && 1422 test->in(1)->Opcode() == Op_ProfileBoolean) { 1423 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 1424 int false_cnt = profile->false_count(); 1425 int true_cnt = profile->true_count(); 1426 1427 // Counts matching depends on the actual test operation (::eq or ::ne). 1428 // No need to scale the counts because profile injection was designed 1429 // to feed exact counts into VM. 1430 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 1431 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 1432 1433 profile->consume(); 1434 return true; 1435 } 1436 return false; 1437 } 1438 //--------------------------dynamic_branch_prediction-------------------------- 1439 // Try to gather dynamic branch prediction behavior. Return a probability 1440 // of the branch being taken and set the "cnt" field. Returns a -1.0 1441 // if we need to use static prediction for some reason. 1442 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 1443 ResourceMark rm; 1444 1445 cnt = COUNT_UNKNOWN; 1446 1447 int taken = 0; 1448 int not_taken = 0; 1449 1450 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 1451 1452 if (use_mdo) { 1453 // Use MethodData information if it is available 1454 // FIXME: free the ProfileData structure 1455 ciMethodData* methodData = method()->method_data(); 1456 if (!methodData->is_mature()) return PROB_UNKNOWN; 1457 ciProfileData* data = methodData->bci_to_data(bci()); 1458 if (data == NULL) { 1459 return PROB_UNKNOWN; 1460 } 1461 if (!data->is_JumpData()) return PROB_UNKNOWN; 1462 1463 // get taken and not taken values 1464 taken = data->as_JumpData()->taken(); 1465 not_taken = 0; 1466 if (data->is_BranchData()) { 1467 not_taken = data->as_BranchData()->not_taken(); 1468 } 1469 1470 // scale the counts to be commensurate with invocation counts: 1471 taken = method()->scale_count(taken); 1472 not_taken = method()->scale_count(not_taken); 1473 } 1474 1475 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 1476 // We also check that individual counters are positive first, otherwise the sum can become positive. 1477 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 1478 if (C->log() != NULL) { 1479 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 1480 } 1481 return PROB_UNKNOWN; 1482 } 1483 1484 // Compute frequency that we arrive here 1485 float sum = taken + not_taken; 1486 // Adjust, if this block is a cloned private block but the 1487 // Jump counts are shared. Taken the private counts for 1488 // just this path instead of the shared counts. 1489 if( block()->count() > 0 ) 1490 sum = block()->count(); 1491 cnt = sum / FreqCountInvocations; 1492 1493 // Pin probability to sane limits 1494 float prob; 1495 if( !taken ) 1496 prob = (0+PROB_MIN) / 2; 1497 else if( !not_taken ) 1498 prob = (1+PROB_MAX) / 2; 1499 else { // Compute probability of true path 1500 prob = (float)taken / (float)(taken + not_taken); 1501 if (prob > PROB_MAX) prob = PROB_MAX; 1502 if (prob < PROB_MIN) prob = PROB_MIN; 1503 } 1504 1505 assert((cnt > 0.0f) && (prob > 0.0f), 1506 "Bad frequency assignment in if"); 1507 1508 if (C->log() != NULL) { 1509 const char* prob_str = NULL; 1510 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 1511 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 1512 char prob_str_buf[30]; 1513 if (prob_str == NULL) { 1514 sprintf(prob_str_buf, "%g", prob); 1515 prob_str = prob_str_buf; 1516 } 1517 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 1518 iter().get_dest(), taken, not_taken, cnt, prob_str); 1519 } 1520 return prob; 1521 } 1522 1523 //-----------------------------branch_prediction------------------------------- 1524 float Parse::branch_prediction(float& cnt, 1525 BoolTest::mask btest, 1526 int target_bci, 1527 Node* test) { 1528 float prob = dynamic_branch_prediction(cnt, btest, test); 1529 // If prob is unknown, switch to static prediction 1530 if (prob != PROB_UNKNOWN) return prob; 1531 1532 prob = PROB_FAIR; // Set default value 1533 if (btest == BoolTest::eq) // Exactly equal test? 1534 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 1535 else if (btest == BoolTest::ne) 1536 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 1537 1538 // If this is a conditional test guarding a backwards branch, 1539 // assume its a loop-back edge. Make it a likely taken branch. 1540 if (target_bci < bci()) { 1541 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 1542 // Since it's an OSR, we probably have profile data, but since 1543 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 1544 // Let's make a special check here for completely zero counts. 1545 ciMethodData* methodData = method()->method_data(); 1546 if (!methodData->is_empty()) { 1547 ciProfileData* data = methodData->bci_to_data(bci()); 1548 // Only stop for truly zero counts, which mean an unknown part 1549 // of the OSR-ed method, and we want to deopt to gather more stats. 1550 // If you have ANY counts, then this loop is simply 'cold' relative 1551 // to the OSR loop. 1552 if (data == NULL || 1553 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 1554 // This is the only way to return PROB_UNKNOWN: 1555 return PROB_UNKNOWN; 1556 } 1557 } 1558 } 1559 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 1560 } 1561 1562 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 1563 return prob; 1564 } 1565 1566 // The magic constants are chosen so as to match the output of 1567 // branch_prediction() when the profile reports a zero taken count. 1568 // It is important to distinguish zero counts unambiguously, because 1569 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 1570 // very small but nonzero probabilities, which if confused with zero 1571 // counts would keep the program recompiling indefinitely. 1572 bool Parse::seems_never_taken(float prob) const { 1573 return prob < PROB_MIN; 1574 } 1575 1576 // True if the comparison seems to be the kind that will not change its 1577 // statistics from true to false. See comments in adjust_map_after_if. 1578 // This question is only asked along paths which are already 1579 // classifed as untaken (by seems_never_taken), so really, 1580 // if a path is never taken, its controlling comparison is 1581 // already acting in a stable fashion. If the comparison 1582 // seems stable, we will put an expensive uncommon trap 1583 // on the untaken path. 1584 bool Parse::seems_stable_comparison() const { 1585 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 1586 return false; 1587 } 1588 return true; 1589 } 1590 1591 //-------------------------------repush_if_args-------------------------------- 1592 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 1593 inline int Parse::repush_if_args() { 1594 if (PrintOpto && WizardMode) { 1595 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 1596 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 1597 method()->print_name(); tty->cr(); 1598 } 1599 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 1600 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 1601 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 1602 assert(argument(0) != NULL, "must exist"); 1603 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 1604 inc_sp(bc_depth); 1605 return bc_depth; 1606 } 1607 1608 //----------------------------------do_ifnull---------------------------------- 1609 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 1610 int target_bci = iter().get_dest(); 1611 1612 Block* branch_block = successor_for_bci(target_bci); 1613 Block* next_block = successor_for_bci(iter().next_bci()); 1614 1615 float cnt; 1616 float prob = branch_prediction(cnt, btest, target_bci, c); 1617 if (prob == PROB_UNKNOWN) { 1618 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 1619 if (PrintOpto && Verbose) { 1620 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1621 } 1622 repush_if_args(); // to gather stats on loop 1623 // We need to mark this branch as taken so that if we recompile we will 1624 // see that it is possible. In the tiered system the interpreter doesn't 1625 // do profiling and by the time we get to the lower tier from the interpreter 1626 // the path may be cold again. Make sure it doesn't look untaken 1627 profile_taken_branch(target_bci, !ProfileInterpreter); 1628 uncommon_trap(Deoptimization::Reason_unreached, 1629 Deoptimization::Action_reinterpret, 1630 NULL, "cold"); 1631 if (C->eliminate_boxing()) { 1632 // Mark the successor blocks as parsed 1633 branch_block->next_path_num(); 1634 next_block->next_path_num(); 1635 } 1636 return; 1637 } 1638 1639 NOT_PRODUCT(explicit_null_checks_inserted++); 1640 1641 // Generate real control flow 1642 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1643 1644 // Sanity check the probability value 1645 assert(prob > 0.0f,"Bad probability in Parser"); 1646 // Need xform to put node in hash table 1647 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1648 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1649 // True branch 1650 { PreserveJVMState pjvms(this); 1651 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1652 set_control(iftrue); 1653 1654 if (stopped()) { // Path is dead? 1655 NOT_PRODUCT(explicit_null_checks_elided++); 1656 if (C->eliminate_boxing()) { 1657 // Mark the successor block as parsed 1658 branch_block->next_path_num(); 1659 } 1660 } else { // Path is live. 1661 // Update method data 1662 profile_taken_branch(target_bci); 1663 adjust_map_after_if(btest, c, prob, branch_block); 1664 if (!stopped()) { 1665 merge(target_bci); 1666 } 1667 } 1668 } 1669 1670 // False branch 1671 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1672 set_control(iffalse); 1673 1674 if (stopped()) { // Path is dead? 1675 NOT_PRODUCT(explicit_null_checks_elided++); 1676 if (C->eliminate_boxing()) { 1677 // Mark the successor block as parsed 1678 next_block->next_path_num(); 1679 } 1680 } else { // Path is live. 1681 // Update method data 1682 profile_not_taken_branch(); 1683 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block); 1684 } 1685 } 1686 1687 //------------------------------------do_if------------------------------------ 1688 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) { 1689 int target_bci = iter().get_dest(); 1690 1691 Block* branch_block = successor_for_bci(target_bci); 1692 Block* next_block = successor_for_bci(iter().next_bci()); 1693 1694 float cnt; 1695 float prob = branch_prediction(cnt, btest, target_bci, c); 1696 float untaken_prob = 1.0 - prob; 1697 1698 if (prob == PROB_UNKNOWN) { 1699 if (PrintOpto && Verbose) { 1700 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1701 } 1702 repush_if_args(); // to gather stats on loop 1703 // We need to mark this branch as taken so that if we recompile we will 1704 // see that it is possible. In the tiered system the interpreter doesn't 1705 // do profiling and by the time we get to the lower tier from the interpreter 1706 // the path may be cold again. Make sure it doesn't look untaken 1707 profile_taken_branch(target_bci, !ProfileInterpreter); 1708 uncommon_trap(Deoptimization::Reason_unreached, 1709 Deoptimization::Action_reinterpret, 1710 NULL, "cold"); 1711 if (C->eliminate_boxing()) { 1712 // Mark the successor blocks as parsed 1713 branch_block->next_path_num(); 1714 next_block->next_path_num(); 1715 } 1716 return; 1717 } 1718 1719 // Sanity check the probability value 1720 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1721 1722 bool taken_if_true = true; 1723 // Convert BoolTest to canonical form: 1724 if (!BoolTest(btest).is_canonical()) { 1725 btest = BoolTest(btest).negate(); 1726 taken_if_true = false; 1727 // prob is NOT updated here; it remains the probability of the taken 1728 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1729 } 1730 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1731 1732 Node* tst0 = new BoolNode(c, btest); 1733 Node* tst = _gvn.transform(tst0); 1734 BoolTest::mask taken_btest = BoolTest::illegal; 1735 BoolTest::mask untaken_btest = BoolTest::illegal; 1736 1737 if (tst->is_Bool()) { 1738 // Refresh c from the transformed bool node, since it may be 1739 // simpler than the original c. Also re-canonicalize btest. 1740 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1741 // That can arise from statements like: if (x instanceof C) ... 1742 if (tst != tst0) { 1743 // Canonicalize one more time since transform can change it. 1744 btest = tst->as_Bool()->_test._test; 1745 if (!BoolTest(btest).is_canonical()) { 1746 // Reverse edges one more time... 1747 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1748 btest = tst->as_Bool()->_test._test; 1749 assert(BoolTest(btest).is_canonical(), "sanity"); 1750 taken_if_true = !taken_if_true; 1751 } 1752 c = tst->in(1); 1753 } 1754 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1755 taken_btest = taken_if_true ? btest : neg_btest; 1756 untaken_btest = taken_if_true ? neg_btest : btest; 1757 } 1758 1759 // Generate real control flow 1760 float true_prob = (taken_if_true ? prob : untaken_prob); 1761 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1762 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1763 Node* taken_branch = new IfTrueNode(iff); 1764 Node* untaken_branch = new IfFalseNode(iff); 1765 if (!taken_if_true) { // Finish conversion to canonical form 1766 Node* tmp = taken_branch; 1767 taken_branch = untaken_branch; 1768 untaken_branch = tmp; 1769 } 1770 1771 // Branch is taken: 1772 { PreserveJVMState pjvms(this); 1773 taken_branch = _gvn.transform(taken_branch); 1774 set_control(taken_branch); 1775 1776 if (stopped()) { 1777 if (C->eliminate_boxing() && !new_path) { 1778 // Mark the successor block as parsed (if we haven't created a new path) 1779 branch_block->next_path_num(); 1780 } 1781 } else { 1782 // Update method data 1783 profile_taken_branch(target_bci); 1784 adjust_map_after_if(taken_btest, c, prob, branch_block); 1785 if (!stopped()) { 1786 if (new_path) { 1787 // Merge by using a new path 1788 merge_new_path(target_bci); 1789 } else if (ctrl_taken != NULL) { 1790 // Don't merge but save taken branch to be wired by caller 1791 *ctrl_taken = control(); 1792 } else { 1793 merge(target_bci); 1794 } 1795 } 1796 } 1797 } 1798 1799 untaken_branch = _gvn.transform(untaken_branch); 1800 set_control(untaken_branch); 1801 1802 // Branch not taken. 1803 if (stopped() && ctrl_taken == NULL) { 1804 if (C->eliminate_boxing()) { 1805 // Mark the successor block as parsed (if caller does not re-wire control flow) 1806 next_block->next_path_num(); 1807 } 1808 } else { 1809 // Update method data 1810 profile_not_taken_branch(); 1811 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block); 1812 } 1813 } 1814 1815 void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) { 1816 // In the case were both operands might be value types, we need to 1817 // use the new acmp implementation. Otherwise, i.e. if one operand 1818 // is not a value type, we can use the old acmp implementation. 1819 Node* cmp = C->optimize_acmp(&_gvn, a, b); 1820 if (cmp != NULL) { 1821 // Use optimized/old acmp 1822 cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); 1823 do_if(btest, cmp); 1824 return; 1825 } 1826 1827 Node* ctrl = NULL; 1828 bool safe_for_replace = true; 1829 if (!UsePointerPerturbation) { 1830 // Emit old acmp before new acmp for quick a != b check 1831 cmp = CmpP(a, b); 1832 cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); 1833 if (btest == BoolTest::ne) { 1834 do_if(btest, cmp, true); 1835 if (stopped()) { 1836 return; // Never equal 1837 } 1838 } else if (btest == BoolTest::eq) { 1839 Node* is_equal = NULL; 1840 { 1841 PreserveJVMState pjvms(this); 1842 do_if(btest, cmp, false, &is_equal); 1843 if (!stopped()) { 1844 // Not equal, skip valuetype check 1845 ctrl = new RegionNode(3); 1846 ctrl->init_req(1, control()); 1847 _gvn.set_type(ctrl, Type::CONTROL); 1848 record_for_igvn(ctrl); 1849 safe_for_replace = false; 1850 } 1851 } 1852 if (is_equal == NULL) { 1853 assert(ctrl != NULL, "no control left"); 1854 set_control(_gvn.transform(ctrl)); 1855 return; // Never equal 1856 } 1857 set_control(is_equal); 1858 } 1859 } 1860 1861 // Null check operand before loading the is_value bit 1862 bool speculate = false; 1863 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(b))) { 1864 // Operand 'b' is never null, swap operands to avoid null check 1865 swap(a, b); 1866 } else if (!too_many_traps(Deoptimization::Reason_speculate_null_check)) { 1867 // Speculate on non-nullness of one operand 1868 if (!_gvn.type(a)->speculative_maybe_null()) { 1869 speculate = true; 1870 } else if (!_gvn.type(b)->speculative_maybe_null()) { 1871 speculate = true; 1872 swap(a, b); 1873 } 1874 } 1875 inc_sp(2); 1876 Node* null_ctl = top(); 1877 Node* not_null_a = null_check_oop(a, &null_ctl, speculate, safe_for_replace, speculate); 1878 assert(!stopped(), "operand is always null"); 1879 dec_sp(2); 1880 Node* region = new RegionNode(2); 1881 Node* is_value = new PhiNode(region, TypeX_X); 1882 if (null_ctl != top()) { 1883 assert(!speculate, "should never be null"); 1884 region->add_req(null_ctl); 1885 is_value->add_req(_gvn.MakeConX(0)); 1886 } 1887 1888 Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); 1889 if (UsePointerPerturbation) { 1890 Node* mark_addr = basic_plus_adr(not_null_a, oopDesc::mark_offset_in_bytes()); 1891 Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 1892 Node* not_mark = _gvn.transform(new XorXNode(mark, _gvn.MakeConX(-1))); 1893 Node* andn = _gvn.transform(new AndXNode(not_mark, value_mask)); 1894 Node* neg_if_value = _gvn.transform(new SubXNode(andn, _gvn.MakeConX(1))); 1895 is_value->init_req(1, _gvn.transform(new RShiftXNode(neg_if_value, _gvn.intcon(63)))); 1896 } else { 1897 is_value->init_req(1, is_always_locked(not_null_a)); 1898 } 1899 region->init_req(1, control()); 1900 1901 set_control(_gvn.transform(region)); 1902 is_value = _gvn.transform(is_value); 1903 1904 if (UsePointerPerturbation) { 1905 // Perturbe oop if operand is a value type to make comparison fail 1906 Node* pert = _gvn.transform(new AddPNode(a, a, is_value)); 1907 cmp = _gvn.transform(new CmpPNode(pert, b)); 1908 } else { 1909 // Check for a value type because we already know that operands are equal 1910 cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); 1911 btest = (btest == BoolTest::eq) ? BoolTest::ne : BoolTest::eq; 1912 } 1913 cmp = optimize_cmp_with_klass(cmp); 1914 do_if(btest, cmp); 1915 1916 if (ctrl != NULL) { 1917 ctrl->init_req(2, control()); 1918 set_control(_gvn.transform(ctrl)); 1919 } 1920 } 1921 1922 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 1923 // Don't want to speculate on uncommon traps when running with -Xcomp 1924 if (!UseInterpreter) { 1925 return false; 1926 } 1927 return (seems_never_taken(prob) && seems_stable_comparison()); 1928 } 1929 1930 void Parse::maybe_add_predicate_after_if(Block* path) { 1931 if (path->is_SEL_head() && path->preds_parsed() == 0) { 1932 // Add predicates at bci of if dominating the loop so traps can be 1933 // recorded on the if's profile data 1934 int bc_depth = repush_if_args(); 1935 add_predicate(); 1936 dec_sp(bc_depth); 1937 path->set_has_predicates(); 1938 } 1939 } 1940 1941 1942 //----------------------------adjust_map_after_if------------------------------ 1943 // Adjust the JVM state to reflect the result of taking this path. 1944 // Basically, it means inspecting the CmpNode controlling this 1945 // branch, seeing how it constrains a tested value, and then 1946 // deciding if it's worth our while to encode this constraint 1947 // as graph nodes in the current abstract interpretation map. 1948 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) { 1949 if (!c->is_Cmp()) { 1950 maybe_add_predicate_after_if(path); 1951 return; 1952 } 1953 1954 if (stopped() || btest == BoolTest::illegal) { 1955 return; // nothing to do 1956 } 1957 1958 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 1959 1960 if (path_is_suitable_for_uncommon_trap(prob)) { 1961 repush_if_args(); 1962 uncommon_trap(Deoptimization::Reason_unstable_if, 1963 Deoptimization::Action_reinterpret, 1964 NULL, 1965 (is_fallthrough ? "taken always" : "taken never")); 1966 return; 1967 } 1968 1969 Node* val = c->in(1); 1970 Node* con = c->in(2); 1971 const Type* tcon = _gvn.type(con); 1972 const Type* tval = _gvn.type(val); 1973 bool have_con = tcon->singleton(); 1974 if (tval->singleton()) { 1975 if (!have_con) { 1976 // Swap, so constant is in con. 1977 con = val; 1978 tcon = tval; 1979 val = c->in(2); 1980 tval = _gvn.type(val); 1981 btest = BoolTest(btest).commute(); 1982 have_con = true; 1983 } else { 1984 // Do we have two constants? Then leave well enough alone. 1985 have_con = false; 1986 } 1987 } 1988 if (!have_con) { // remaining adjustments need a con 1989 maybe_add_predicate_after_if(path); 1990 return; 1991 } 1992 1993 sharpen_type_after_if(btest, con, tcon, val, tval); 1994 maybe_add_predicate_after_if(path); 1995 } 1996 1997 1998 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 1999 Node* ldk; 2000 if (n->is_DecodeNKlass()) { 2001 if (n->in(1)->Opcode() != Op_LoadNKlass) { 2002 return NULL; 2003 } else { 2004 ldk = n->in(1); 2005 } 2006 } else if (n->Opcode() != Op_LoadKlass) { 2007 return NULL; 2008 } else { 2009 ldk = n; 2010 } 2011 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 2012 2013 Node* adr = ldk->in(MemNode::Address); 2014 intptr_t off = 0; 2015 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 2016 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 2017 return NULL; 2018 const TypePtr* tp = gvn->type(obj)->is_ptr(); 2019 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 2020 return NULL; 2021 2022 return obj; 2023 } 2024 2025 void Parse::sharpen_type_after_if(BoolTest::mask btest, 2026 Node* con, const Type* tcon, 2027 Node* val, const Type* tval) { 2028 // Look for opportunities to sharpen the type of a node 2029 // whose klass is compared with a constant klass. 2030 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 2031 Node* obj = extract_obj_from_klass_load(&_gvn, val); 2032 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 2033 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 2034 // Found: 2035 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 2036 // or the narrowOop equivalent. 2037 const Type* obj_type = _gvn.type(obj); 2038 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 2039 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 2040 tboth->higher_equal(obj_type)) { 2041 // obj has to be of the exact type Foo if the CmpP succeeds. 2042 int obj_in_map = map()->find_edge(obj); 2043 JVMState* jvms = this->jvms(); 2044 if (obj_in_map >= 0 && 2045 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 2046 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 2047 const Type* tcc = ccast->as_Type()->type(); 2048 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 2049 // Delay transform() call to allow recovery of pre-cast value 2050 // at the control merge. 2051 _gvn.set_type_bottom(ccast); 2052 record_for_igvn(ccast); 2053 // Here's the payoff. 2054 replace_in_map(obj, ccast); 2055 } 2056 } 2057 } 2058 } 2059 2060 int val_in_map = map()->find_edge(val); 2061 if (val_in_map < 0) return; // replace_in_map would be useless 2062 { 2063 JVMState* jvms = this->jvms(); 2064 if (!(jvms->is_loc(val_in_map) || 2065 jvms->is_stk(val_in_map))) 2066 return; // again, it would be useless 2067 } 2068 2069 // Check for a comparison to a constant, and "know" that the compared 2070 // value is constrained on this path. 2071 assert(tcon->singleton(), ""); 2072 ConstraintCastNode* ccast = NULL; 2073 Node* cast = NULL; 2074 2075 switch (btest) { 2076 case BoolTest::eq: // Constant test? 2077 { 2078 const Type* tboth = tcon->join_speculative(tval); 2079 if (tboth == tval) break; // Nothing to gain. 2080 if (tcon->isa_int()) { 2081 ccast = new CastIINode(val, tboth); 2082 } else if (tcon == TypePtr::NULL_PTR) { 2083 // Cast to null, but keep the pointer identity temporarily live. 2084 ccast = new CastPPNode(val, tboth); 2085 } else { 2086 const TypeF* tf = tcon->isa_float_constant(); 2087 const TypeD* td = tcon->isa_double_constant(); 2088 // Exclude tests vs float/double 0 as these could be 2089 // either +0 or -0. Just because you are equal to +0 2090 // doesn't mean you ARE +0! 2091 // Note, following code also replaces Long and Oop values. 2092 if ((!tf || tf->_f != 0.0) && 2093 (!td || td->_d != 0.0)) 2094 cast = con; // Replace non-constant val by con. 2095 } 2096 } 2097 break; 2098 2099 case BoolTest::ne: 2100 if (tcon == TypePtr::NULL_PTR) { 2101 cast = cast_not_null(val, false); 2102 } 2103 break; 2104 2105 default: 2106 // (At this point we could record int range types with CastII.) 2107 break; 2108 } 2109 2110 if (ccast != NULL) { 2111 const Type* tcc = ccast->as_Type()->type(); 2112 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 2113 // Delay transform() call to allow recovery of pre-cast value 2114 // at the control merge. 2115 ccast->set_req(0, control()); 2116 _gvn.set_type_bottom(ccast); 2117 record_for_igvn(ccast); 2118 cast = ccast; 2119 } 2120 2121 if (cast != NULL) { // Here's the payoff. 2122 replace_in_map(val, cast); 2123 } 2124 } 2125 2126 /** 2127 * Use speculative type to optimize CmpP node: if comparison is 2128 * against the low level class, cast the object to the speculative 2129 * type if any. CmpP should then go away. 2130 * 2131 * @param c expected CmpP node 2132 * @return result of CmpP on object casted to speculative type 2133 * 2134 */ 2135 Node* Parse::optimize_cmp_with_klass(Node* c) { 2136 // If this is transformed by the _gvn to a comparison with the low 2137 // level klass then we may be able to use speculation 2138 if (c->Opcode() == Op_CmpP && 2139 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 2140 c->in(2)->is_Con()) { 2141 Node* load_klass = NULL; 2142 Node* decode = NULL; 2143 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 2144 decode = c->in(1); 2145 load_klass = c->in(1)->in(1); 2146 } else { 2147 load_klass = c->in(1); 2148 } 2149 if (load_klass->in(2)->is_AddP()) { 2150 Node* addp = load_klass->in(2); 2151 Node* obj = addp->in(AddPNode::Address); 2152 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 2153 if (obj_type->speculative_type_not_null() != NULL) { 2154 ciKlass* k = obj_type->speculative_type(); 2155 inc_sp(2); 2156 obj = maybe_cast_profiled_obj(obj, k); 2157 dec_sp(2); 2158 if (obj->is_ValueType()) { 2159 assert(obj->as_ValueType()->is_allocated(&_gvn), "must be allocated"); 2160 obj = obj->as_ValueType()->get_oop(); 2161 } 2162 // Make the CmpP use the casted obj 2163 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 2164 load_klass = load_klass->clone(); 2165 load_klass->set_req(2, addp); 2166 load_klass = _gvn.transform(load_klass); 2167 if (decode != NULL) { 2168 decode = decode->clone(); 2169 decode->set_req(1, load_klass); 2170 load_klass = _gvn.transform(decode); 2171 } 2172 c = c->clone(); 2173 c->set_req(1, load_klass); 2174 c = _gvn.transform(c); 2175 } 2176 } 2177 } 2178 return c; 2179 } 2180 2181 //------------------------------do_one_bytecode-------------------------------- 2182 // Parse this bytecode, and alter the Parsers JVM->Node mapping 2183 void Parse::do_one_bytecode() { 2184 Node *a, *b, *c, *d; // Handy temps 2185 BoolTest::mask btest; 2186 int i; 2187 2188 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 2189 2190 if (C->check_node_count(NodeLimitFudgeFactor * 5, 2191 "out of nodes parsing method")) { 2192 return; 2193 } 2194 2195 #ifdef ASSERT 2196 // for setting breakpoints 2197 if (TraceOptoParse) { 2198 tty->print(" @"); 2199 dump_bci(bci()); 2200 tty->cr(); 2201 } 2202 #endif 2203 2204 switch (bc()) { 2205 case Bytecodes::_nop: 2206 // do nothing 2207 break; 2208 case Bytecodes::_lconst_0: 2209 push_pair(longcon(0)); 2210 break; 2211 2212 case Bytecodes::_lconst_1: 2213 push_pair(longcon(1)); 2214 break; 2215 2216 case Bytecodes::_fconst_0: 2217 push(zerocon(T_FLOAT)); 2218 break; 2219 2220 case Bytecodes::_fconst_1: 2221 push(makecon(TypeF::ONE)); 2222 break; 2223 2224 case Bytecodes::_fconst_2: 2225 push(makecon(TypeF::make(2.0f))); 2226 break; 2227 2228 case Bytecodes::_dconst_0: 2229 push_pair(zerocon(T_DOUBLE)); 2230 break; 2231 2232 case Bytecodes::_dconst_1: 2233 push_pair(makecon(TypeD::ONE)); 2234 break; 2235 2236 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 2237 case Bytecodes::_iconst_0: push(intcon( 0)); break; 2238 case Bytecodes::_iconst_1: push(intcon( 1)); break; 2239 case Bytecodes::_iconst_2: push(intcon( 2)); break; 2240 case Bytecodes::_iconst_3: push(intcon( 3)); break; 2241 case Bytecodes::_iconst_4: push(intcon( 4)); break; 2242 case Bytecodes::_iconst_5: push(intcon( 5)); break; 2243 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 2244 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 2245 case Bytecodes::_aconst_null: push(null()); break; 2246 case Bytecodes::_ldc: 2247 case Bytecodes::_ldc_w: 2248 case Bytecodes::_ldc2_w: 2249 // If the constant is unresolved, run this BC once in the interpreter. 2250 { 2251 ciConstant constant = iter().get_constant(); 2252 if (!constant.is_valid() || 2253 (constant.basic_type() == T_OBJECT && 2254 !constant.as_object()->is_loaded())) { 2255 int index = iter().get_constant_pool_index(); 2256 constantTag tag = iter().get_constant_pool_tag(index); 2257 uncommon_trap(Deoptimization::make_trap_request 2258 (Deoptimization::Reason_unloaded, 2259 Deoptimization::Action_reinterpret, 2260 index), 2261 NULL, tag.internal_name()); 2262 break; 2263 } 2264 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 2265 "must be java_mirror of klass"); 2266 const Type* con_type = Type::make_from_constant(constant); 2267 if (con_type != NULL) { 2268 push_node(con_type->basic_type(), makecon(con_type)); 2269 } 2270 } 2271 2272 break; 2273 2274 case Bytecodes::_aload_0: 2275 push( local(0) ); 2276 break; 2277 case Bytecodes::_aload_1: 2278 push( local(1) ); 2279 break; 2280 case Bytecodes::_aload_2: 2281 push( local(2) ); 2282 break; 2283 case Bytecodes::_aload_3: 2284 push( local(3) ); 2285 break; 2286 case Bytecodes::_aload: 2287 push( local(iter().get_index()) ); 2288 break; 2289 2290 case Bytecodes::_fload_0: 2291 case Bytecodes::_iload_0: 2292 push( local(0) ); 2293 break; 2294 case Bytecodes::_fload_1: 2295 case Bytecodes::_iload_1: 2296 push( local(1) ); 2297 break; 2298 case Bytecodes::_fload_2: 2299 case Bytecodes::_iload_2: 2300 push( local(2) ); 2301 break; 2302 case Bytecodes::_fload_3: 2303 case Bytecodes::_iload_3: 2304 push( local(3) ); 2305 break; 2306 case Bytecodes::_fload: 2307 case Bytecodes::_iload: 2308 push( local(iter().get_index()) ); 2309 break; 2310 case Bytecodes::_lload_0: 2311 push_pair_local( 0 ); 2312 break; 2313 case Bytecodes::_lload_1: 2314 push_pair_local( 1 ); 2315 break; 2316 case Bytecodes::_lload_2: 2317 push_pair_local( 2 ); 2318 break; 2319 case Bytecodes::_lload_3: 2320 push_pair_local( 3 ); 2321 break; 2322 case Bytecodes::_lload: 2323 push_pair_local( iter().get_index() ); 2324 break; 2325 2326 case Bytecodes::_dload_0: 2327 push_pair_local(0); 2328 break; 2329 case Bytecodes::_dload_1: 2330 push_pair_local(1); 2331 break; 2332 case Bytecodes::_dload_2: 2333 push_pair_local(2); 2334 break; 2335 case Bytecodes::_dload_3: 2336 push_pair_local(3); 2337 break; 2338 case Bytecodes::_dload: 2339 push_pair_local(iter().get_index()); 2340 break; 2341 case Bytecodes::_fstore_0: 2342 case Bytecodes::_istore_0: 2343 case Bytecodes::_astore_0: 2344 set_local( 0, pop() ); 2345 break; 2346 case Bytecodes::_fstore_1: 2347 case Bytecodes::_istore_1: 2348 case Bytecodes::_astore_1: 2349 set_local( 1, pop() ); 2350 break; 2351 case Bytecodes::_fstore_2: 2352 case Bytecodes::_istore_2: 2353 case Bytecodes::_astore_2: 2354 set_local( 2, pop() ); 2355 break; 2356 case Bytecodes::_fstore_3: 2357 case Bytecodes::_istore_3: 2358 case Bytecodes::_astore_3: 2359 set_local( 3, pop() ); 2360 break; 2361 case Bytecodes::_fstore: 2362 case Bytecodes::_istore: 2363 case Bytecodes::_astore: 2364 set_local( iter().get_index(), pop() ); 2365 break; 2366 // long stores 2367 case Bytecodes::_lstore_0: 2368 set_pair_local( 0, pop_pair() ); 2369 break; 2370 case Bytecodes::_lstore_1: 2371 set_pair_local( 1, pop_pair() ); 2372 break; 2373 case Bytecodes::_lstore_2: 2374 set_pair_local( 2, pop_pair() ); 2375 break; 2376 case Bytecodes::_lstore_3: 2377 set_pair_local( 3, pop_pair() ); 2378 break; 2379 case Bytecodes::_lstore: 2380 set_pair_local( iter().get_index(), pop_pair() ); 2381 break; 2382 2383 // double stores 2384 case Bytecodes::_dstore_0: 2385 set_pair_local( 0, dstore_rounding(pop_pair()) ); 2386 break; 2387 case Bytecodes::_dstore_1: 2388 set_pair_local( 1, dstore_rounding(pop_pair()) ); 2389 break; 2390 case Bytecodes::_dstore_2: 2391 set_pair_local( 2, dstore_rounding(pop_pair()) ); 2392 break; 2393 case Bytecodes::_dstore_3: 2394 set_pair_local( 3, dstore_rounding(pop_pair()) ); 2395 break; 2396 case Bytecodes::_dstore: 2397 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 2398 break; 2399 2400 case Bytecodes::_pop: dec_sp(1); break; 2401 case Bytecodes::_pop2: dec_sp(2); break; 2402 case Bytecodes::_swap: 2403 a = pop(); 2404 b = pop(); 2405 push(a); 2406 push(b); 2407 break; 2408 case Bytecodes::_dup: 2409 a = pop(); 2410 push(a); 2411 push(a); 2412 break; 2413 case Bytecodes::_dup_x1: 2414 a = pop(); 2415 b = pop(); 2416 push( a ); 2417 push( b ); 2418 push( a ); 2419 break; 2420 case Bytecodes::_dup_x2: 2421 a = pop(); 2422 b = pop(); 2423 c = pop(); 2424 push( a ); 2425 push( c ); 2426 push( b ); 2427 push( a ); 2428 break; 2429 case Bytecodes::_dup2: 2430 a = pop(); 2431 b = pop(); 2432 push( b ); 2433 push( a ); 2434 push( b ); 2435 push( a ); 2436 break; 2437 2438 case Bytecodes::_dup2_x1: 2439 // before: .. c, b, a 2440 // after: .. b, a, c, b, a 2441 // not tested 2442 a = pop(); 2443 b = pop(); 2444 c = pop(); 2445 push( b ); 2446 push( a ); 2447 push( c ); 2448 push( b ); 2449 push( a ); 2450 break; 2451 case Bytecodes::_dup2_x2: 2452 // before: .. d, c, b, a 2453 // after: .. b, a, d, c, b, a 2454 // not tested 2455 a = pop(); 2456 b = pop(); 2457 c = pop(); 2458 d = pop(); 2459 push( b ); 2460 push( a ); 2461 push( d ); 2462 push( c ); 2463 push( b ); 2464 push( a ); 2465 break; 2466 2467 case Bytecodes::_arraylength: { 2468 // Must do null-check with value on expression stack 2469 Node *ary = null_check(peek(), T_ARRAY); 2470 // Compile-time detect of null-exception? 2471 if (stopped()) return; 2472 a = pop(); 2473 push(load_array_length(a)); 2474 break; 2475 } 2476 2477 case Bytecodes::_baload: array_load(T_BYTE); break; 2478 case Bytecodes::_caload: array_load(T_CHAR); break; 2479 case Bytecodes::_iaload: array_load(T_INT); break; 2480 case Bytecodes::_saload: array_load(T_SHORT); break; 2481 case Bytecodes::_faload: array_load(T_FLOAT); break; 2482 case Bytecodes::_aaload: array_load(T_OBJECT); break; 2483 case Bytecodes::_laload: array_load(T_LONG); break; 2484 case Bytecodes::_daload: array_load(T_DOUBLE); break; 2485 case Bytecodes::_bastore: array_store(T_BYTE); break; 2486 case Bytecodes::_castore: array_store(T_CHAR); break; 2487 case Bytecodes::_iastore: array_store(T_INT); break; 2488 case Bytecodes::_sastore: array_store(T_SHORT); break; 2489 case Bytecodes::_fastore: array_store(T_FLOAT); break; 2490 case Bytecodes::_aastore: array_store(T_OBJECT); break; 2491 case Bytecodes::_lastore: array_store(T_LONG); break; 2492 case Bytecodes::_dastore: array_store(T_DOUBLE); break; 2493 2494 case Bytecodes::_getfield: 2495 do_getfield(); 2496 break; 2497 2498 case Bytecodes::_getstatic: 2499 do_getstatic(); 2500 break; 2501 2502 case Bytecodes::_putfield: 2503 do_putfield(); 2504 break; 2505 2506 case Bytecodes::_putstatic: 2507 do_putstatic(); 2508 break; 2509 2510 case Bytecodes::_irem: 2511 do_irem(); 2512 break; 2513 case Bytecodes::_idiv: 2514 // Must keep both values on the expression-stack during null-check 2515 zero_check_int(peek()); 2516 // Compile-time detect of null-exception? 2517 if (stopped()) return; 2518 b = pop(); 2519 a = pop(); 2520 push( _gvn.transform( new DivINode(control(),a,b) ) ); 2521 break; 2522 case Bytecodes::_imul: 2523 b = pop(); a = pop(); 2524 push( _gvn.transform( new MulINode(a,b) ) ); 2525 break; 2526 case Bytecodes::_iadd: 2527 b = pop(); a = pop(); 2528 push( _gvn.transform( new AddINode(a,b) ) ); 2529 break; 2530 case Bytecodes::_ineg: 2531 a = pop(); 2532 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 2533 break; 2534 case Bytecodes::_isub: 2535 b = pop(); a = pop(); 2536 push( _gvn.transform( new SubINode(a,b) ) ); 2537 break; 2538 case Bytecodes::_iand: 2539 b = pop(); a = pop(); 2540 push( _gvn.transform( new AndINode(a,b) ) ); 2541 break; 2542 case Bytecodes::_ior: 2543 b = pop(); a = pop(); 2544 push( _gvn.transform( new OrINode(a,b) ) ); 2545 break; 2546 case Bytecodes::_ixor: 2547 b = pop(); a = pop(); 2548 push( _gvn.transform( new XorINode(a,b) ) ); 2549 break; 2550 case Bytecodes::_ishl: 2551 b = pop(); a = pop(); 2552 push( _gvn.transform( new LShiftINode(a,b) ) ); 2553 break; 2554 case Bytecodes::_ishr: 2555 b = pop(); a = pop(); 2556 push( _gvn.transform( new RShiftINode(a,b) ) ); 2557 break; 2558 case Bytecodes::_iushr: 2559 b = pop(); a = pop(); 2560 push( _gvn.transform( new URShiftINode(a,b) ) ); 2561 break; 2562 2563 case Bytecodes::_fneg: 2564 a = pop(); 2565 b = _gvn.transform(new NegFNode (a)); 2566 push(b); 2567 break; 2568 2569 case Bytecodes::_fsub: 2570 b = pop(); 2571 a = pop(); 2572 c = _gvn.transform( new SubFNode(a,b) ); 2573 d = precision_rounding(c); 2574 push( d ); 2575 break; 2576 2577 case Bytecodes::_fadd: 2578 b = pop(); 2579 a = pop(); 2580 c = _gvn.transform( new AddFNode(a,b) ); 2581 d = precision_rounding(c); 2582 push( d ); 2583 break; 2584 2585 case Bytecodes::_fmul: 2586 b = pop(); 2587 a = pop(); 2588 c = _gvn.transform( new MulFNode(a,b) ); 2589 d = precision_rounding(c); 2590 push( d ); 2591 break; 2592 2593 case Bytecodes::_fdiv: 2594 b = pop(); 2595 a = pop(); 2596 c = _gvn.transform( new DivFNode(0,a,b) ); 2597 d = precision_rounding(c); 2598 push( d ); 2599 break; 2600 2601 case Bytecodes::_frem: 2602 if (Matcher::has_match_rule(Op_ModF)) { 2603 // Generate a ModF node. 2604 b = pop(); 2605 a = pop(); 2606 c = _gvn.transform( new ModFNode(0,a,b) ); 2607 d = precision_rounding(c); 2608 push( d ); 2609 } 2610 else { 2611 // Generate a call. 2612 modf(); 2613 } 2614 break; 2615 2616 case Bytecodes::_fcmpl: 2617 b = pop(); 2618 a = pop(); 2619 c = _gvn.transform( new CmpF3Node( a, b)); 2620 push(c); 2621 break; 2622 case Bytecodes::_fcmpg: 2623 b = pop(); 2624 a = pop(); 2625 2626 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 2627 // which negates the result sign except for unordered. Flip the unordered 2628 // as well by using CmpF3 which implements unordered-lesser instead of 2629 // unordered-greater semantics. Finally, commute the result bits. Result 2630 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 2631 c = _gvn.transform( new CmpF3Node( b, a)); 2632 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2633 push(c); 2634 break; 2635 2636 case Bytecodes::_f2i: 2637 a = pop(); 2638 push(_gvn.transform(new ConvF2INode(a))); 2639 break; 2640 2641 case Bytecodes::_d2i: 2642 a = pop_pair(); 2643 b = _gvn.transform(new ConvD2INode(a)); 2644 push( b ); 2645 break; 2646 2647 case Bytecodes::_f2d: 2648 a = pop(); 2649 b = _gvn.transform( new ConvF2DNode(a)); 2650 push_pair( b ); 2651 break; 2652 2653 case Bytecodes::_d2f: 2654 a = pop_pair(); 2655 b = _gvn.transform( new ConvD2FNode(a)); 2656 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 2657 //b = _gvn.transform(new RoundFloatNode(0, b) ); 2658 push( b ); 2659 break; 2660 2661 case Bytecodes::_l2f: 2662 if (Matcher::convL2FSupported()) { 2663 a = pop_pair(); 2664 b = _gvn.transform( new ConvL2FNode(a)); 2665 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 2666 // Rather than storing the result into an FP register then pushing 2667 // out to memory to round, the machine instruction that implements 2668 // ConvL2D is responsible for rounding. 2669 // c = precision_rounding(b); 2670 c = _gvn.transform(b); 2671 push(c); 2672 } else { 2673 l2f(); 2674 } 2675 break; 2676 2677 case Bytecodes::_l2d: 2678 a = pop_pair(); 2679 b = _gvn.transform( new ConvL2DNode(a)); 2680 // For i486.ad, rounding is always necessary (see _l2f above). 2681 // c = dprecision_rounding(b); 2682 c = _gvn.transform(b); 2683 push_pair(c); 2684 break; 2685 2686 case Bytecodes::_f2l: 2687 a = pop(); 2688 b = _gvn.transform( new ConvF2LNode(a)); 2689 push_pair(b); 2690 break; 2691 2692 case Bytecodes::_d2l: 2693 a = pop_pair(); 2694 b = _gvn.transform( new ConvD2LNode(a)); 2695 push_pair(b); 2696 break; 2697 2698 case Bytecodes::_dsub: 2699 b = pop_pair(); 2700 a = pop_pair(); 2701 c = _gvn.transform( new SubDNode(a,b) ); 2702 d = dprecision_rounding(c); 2703 push_pair( d ); 2704 break; 2705 2706 case Bytecodes::_dadd: 2707 b = pop_pair(); 2708 a = pop_pair(); 2709 c = _gvn.transform( new AddDNode(a,b) ); 2710 d = dprecision_rounding(c); 2711 push_pair( d ); 2712 break; 2713 2714 case Bytecodes::_dmul: 2715 b = pop_pair(); 2716 a = pop_pair(); 2717 c = _gvn.transform( new MulDNode(a,b) ); 2718 d = dprecision_rounding(c); 2719 push_pair( d ); 2720 break; 2721 2722 case Bytecodes::_ddiv: 2723 b = pop_pair(); 2724 a = pop_pair(); 2725 c = _gvn.transform( new DivDNode(0,a,b) ); 2726 d = dprecision_rounding(c); 2727 push_pair( d ); 2728 break; 2729 2730 case Bytecodes::_dneg: 2731 a = pop_pair(); 2732 b = _gvn.transform(new NegDNode (a)); 2733 push_pair(b); 2734 break; 2735 2736 case Bytecodes::_drem: 2737 if (Matcher::has_match_rule(Op_ModD)) { 2738 // Generate a ModD node. 2739 b = pop_pair(); 2740 a = pop_pair(); 2741 // a % b 2742 2743 c = _gvn.transform( new ModDNode(0,a,b) ); 2744 d = dprecision_rounding(c); 2745 push_pair( d ); 2746 } 2747 else { 2748 // Generate a call. 2749 modd(); 2750 } 2751 break; 2752 2753 case Bytecodes::_dcmpl: 2754 b = pop_pair(); 2755 a = pop_pair(); 2756 c = _gvn.transform( new CmpD3Node( a, b)); 2757 push(c); 2758 break; 2759 2760 case Bytecodes::_dcmpg: 2761 b = pop_pair(); 2762 a = pop_pair(); 2763 // Same as dcmpl but need to flip the unordered case. 2764 // Commute the inputs, which negates the result sign except for unordered. 2765 // Flip the unordered as well by using CmpD3 which implements 2766 // unordered-lesser instead of unordered-greater semantics. 2767 // Finally, negate the result bits. Result is same as using a 2768 // CmpD3Greater except we did it with CmpD3 alone. 2769 c = _gvn.transform( new CmpD3Node( b, a)); 2770 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2771 push(c); 2772 break; 2773 2774 2775 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2776 case Bytecodes::_land: 2777 b = pop_pair(); 2778 a = pop_pair(); 2779 c = _gvn.transform( new AndLNode(a,b) ); 2780 push_pair(c); 2781 break; 2782 case Bytecodes::_lor: 2783 b = pop_pair(); 2784 a = pop_pair(); 2785 c = _gvn.transform( new OrLNode(a,b) ); 2786 push_pair(c); 2787 break; 2788 case Bytecodes::_lxor: 2789 b = pop_pair(); 2790 a = pop_pair(); 2791 c = _gvn.transform( new XorLNode(a,b) ); 2792 push_pair(c); 2793 break; 2794 2795 case Bytecodes::_lshl: 2796 b = pop(); // the shift count 2797 a = pop_pair(); // value to be shifted 2798 c = _gvn.transform( new LShiftLNode(a,b) ); 2799 push_pair(c); 2800 break; 2801 case Bytecodes::_lshr: 2802 b = pop(); // the shift count 2803 a = pop_pair(); // value to be shifted 2804 c = _gvn.transform( new RShiftLNode(a,b) ); 2805 push_pair(c); 2806 break; 2807 case Bytecodes::_lushr: 2808 b = pop(); // the shift count 2809 a = pop_pair(); // value to be shifted 2810 c = _gvn.transform( new URShiftLNode(a,b) ); 2811 push_pair(c); 2812 break; 2813 case Bytecodes::_lmul: 2814 b = pop_pair(); 2815 a = pop_pair(); 2816 c = _gvn.transform( new MulLNode(a,b) ); 2817 push_pair(c); 2818 break; 2819 2820 case Bytecodes::_lrem: 2821 // Must keep both values on the expression-stack during null-check 2822 assert(peek(0) == top(), "long word order"); 2823 zero_check_long(peek(1)); 2824 // Compile-time detect of null-exception? 2825 if (stopped()) return; 2826 b = pop_pair(); 2827 a = pop_pair(); 2828 c = _gvn.transform( new ModLNode(control(),a,b) ); 2829 push_pair(c); 2830 break; 2831 2832 case Bytecodes::_ldiv: 2833 // Must keep both values on the expression-stack during null-check 2834 assert(peek(0) == top(), "long word order"); 2835 zero_check_long(peek(1)); 2836 // Compile-time detect of null-exception? 2837 if (stopped()) return; 2838 b = pop_pair(); 2839 a = pop_pair(); 2840 c = _gvn.transform( new DivLNode(control(),a,b) ); 2841 push_pair(c); 2842 break; 2843 2844 case Bytecodes::_ladd: 2845 b = pop_pair(); 2846 a = pop_pair(); 2847 c = _gvn.transform( new AddLNode(a,b) ); 2848 push_pair(c); 2849 break; 2850 case Bytecodes::_lsub: 2851 b = pop_pair(); 2852 a = pop_pair(); 2853 c = _gvn.transform( new SubLNode(a,b) ); 2854 push_pair(c); 2855 break; 2856 case Bytecodes::_lcmp: 2857 // Safepoints are now inserted _before_ branches. The long-compare 2858 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 2859 // slew of control flow. These are usually followed by a CmpI vs zero and 2860 // a branch; this pattern then optimizes to the obvious long-compare and 2861 // branch. However, if the branch is backwards there's a Safepoint 2862 // inserted. The inserted Safepoint captures the JVM state at the 2863 // pre-branch point, i.e. it captures the 3-way value. Thus if a 2864 // long-compare is used to control a loop the debug info will force 2865 // computation of the 3-way value, even though the generated code uses a 2866 // long-compare and branch. We try to rectify the situation by inserting 2867 // a SafePoint here and have it dominate and kill the safepoint added at a 2868 // following backwards branch. At this point the JVM state merely holds 2 2869 // longs but not the 3-way value. 2870 if( UseLoopSafepoints ) { 2871 switch( iter().next_bc() ) { 2872 case Bytecodes::_ifgt: 2873 case Bytecodes::_iflt: 2874 case Bytecodes::_ifge: 2875 case Bytecodes::_ifle: 2876 case Bytecodes::_ifne: 2877 case Bytecodes::_ifeq: 2878 // If this is a backwards branch in the bytecodes, add Safepoint 2879 maybe_add_safepoint(iter().next_get_dest()); 2880 default: 2881 break; 2882 } 2883 } 2884 b = pop_pair(); 2885 a = pop_pair(); 2886 c = _gvn.transform( new CmpL3Node( a, b )); 2887 push(c); 2888 break; 2889 2890 case Bytecodes::_lneg: 2891 a = pop_pair(); 2892 b = _gvn.transform( new SubLNode(longcon(0),a)); 2893 push_pair(b); 2894 break; 2895 case Bytecodes::_l2i: 2896 a = pop_pair(); 2897 push( _gvn.transform( new ConvL2INode(a))); 2898 break; 2899 case Bytecodes::_i2l: 2900 a = pop(); 2901 b = _gvn.transform( new ConvI2LNode(a)); 2902 push_pair(b); 2903 break; 2904 case Bytecodes::_i2b: 2905 // Sign extend 2906 a = pop(); 2907 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); 2908 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); 2909 push( a ); 2910 break; 2911 case Bytecodes::_i2s: 2912 a = pop(); 2913 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); 2914 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); 2915 push( a ); 2916 break; 2917 case Bytecodes::_i2c: 2918 a = pop(); 2919 push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); 2920 break; 2921 2922 case Bytecodes::_i2f: 2923 a = pop(); 2924 b = _gvn.transform( new ConvI2FNode(a) ) ; 2925 c = precision_rounding(b); 2926 push (b); 2927 break; 2928 2929 case Bytecodes::_i2d: 2930 a = pop(); 2931 b = _gvn.transform( new ConvI2DNode(a)); 2932 push_pair(b); 2933 break; 2934 2935 case Bytecodes::_iinc: // Increment local 2936 i = iter().get_index(); // Get local index 2937 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 2938 break; 2939 2940 // Exit points of synchronized methods must have an unlock node 2941 case Bytecodes::_return: 2942 return_current(NULL); 2943 break; 2944 2945 case Bytecodes::_ireturn: 2946 case Bytecodes::_areturn: 2947 case Bytecodes::_freturn: 2948 return_current(pop()); 2949 break; 2950 case Bytecodes::_lreturn: 2951 return_current(pop_pair()); 2952 break; 2953 case Bytecodes::_dreturn: 2954 return_current(pop_pair()); 2955 break; 2956 2957 case Bytecodes::_athrow: 2958 // null exception oop throws NULL pointer exception 2959 null_check(peek()); 2960 if (stopped()) return; 2961 // Hook the thrown exception directly to subsequent handlers. 2962 if (BailoutToInterpreterForThrows) { 2963 // Keep method interpreted from now on. 2964 uncommon_trap(Deoptimization::Reason_unhandled, 2965 Deoptimization::Action_make_not_compilable); 2966 return; 2967 } 2968 if (env()->jvmti_can_post_on_exceptions()) { 2969 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 2970 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 2971 } 2972 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 2973 add_exception_state(make_exception_state(peek())); 2974 break; 2975 2976 case Bytecodes::_goto: // fall through 2977 case Bytecodes::_goto_w: { 2978 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 2979 2980 // If this is a backwards branch in the bytecodes, add Safepoint 2981 maybe_add_safepoint(target_bci); 2982 2983 // Update method data 2984 profile_taken_branch(target_bci); 2985 2986 // Merge the current control into the target basic block 2987 merge(target_bci); 2988 2989 // See if we can get some profile data and hand it off to the next block 2990 Block *target_block = block()->successor_for_bci(target_bci); 2991 if (target_block->pred_count() != 1) break; 2992 ciMethodData* methodData = method()->method_data(); 2993 if (!methodData->is_mature()) break; 2994 ciProfileData* data = methodData->bci_to_data(bci()); 2995 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 2996 int taken = ((ciJumpData*)data)->taken(); 2997 taken = method()->scale_count(taken); 2998 target_block->set_count(taken); 2999 break; 3000 } 3001 3002 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 3003 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 3004 handle_if_null: 3005 // If this is a backwards branch in the bytecodes, add Safepoint 3006 maybe_add_safepoint(iter().get_dest()); 3007 a = null(); 3008 b = pop(); 3009 if (b->is_ValueType()) { 3010 // Return constant false because 'b' is always non-null 3011 c = _gvn.makecon(TypeInt::CC_GT); 3012 } else { 3013 if (!_gvn.type(b)->speculative_maybe_null() && 3014 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 3015 inc_sp(1); 3016 Node* null_ctl = top(); 3017 b = null_check_oop(b, &null_ctl, true, true, true); 3018 assert(null_ctl->is_top(), "no null control here"); 3019 dec_sp(1); 3020 } else if (_gvn.type(b)->speculative_always_null() && 3021 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { 3022 inc_sp(1); 3023 b = null_assert(b); 3024 dec_sp(1); 3025 } 3026 c = _gvn.transform( new CmpPNode(b, a) ); 3027 } 3028 do_ifnull(btest, c); 3029 break; 3030 3031 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 3032 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 3033 handle_if_acmp: 3034 // If this is a backwards branch in the bytecodes, add Safepoint 3035 maybe_add_safepoint(iter().get_dest()); 3036 a = pop(); 3037 b = pop(); 3038 do_acmp(btest, a, b); 3039 break; 3040 3041 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 3042 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 3043 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 3044 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 3045 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 3046 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 3047 handle_ifxx: 3048 // If this is a backwards branch in the bytecodes, add Safepoint 3049 maybe_add_safepoint(iter().get_dest()); 3050 a = _gvn.intcon(0); 3051 b = pop(); 3052 c = _gvn.transform( new CmpINode(b, a) ); 3053 do_if(btest, c); 3054 break; 3055 3056 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 3057 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 3058 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 3059 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 3060 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 3061 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 3062 handle_if_icmp: 3063 // If this is a backwards branch in the bytecodes, add Safepoint 3064 maybe_add_safepoint(iter().get_dest()); 3065 a = pop(); 3066 b = pop(); 3067 c = _gvn.transform( new CmpINode( b, a ) ); 3068 do_if(btest, c); 3069 break; 3070 3071 case Bytecodes::_tableswitch: 3072 do_tableswitch(); 3073 break; 3074 3075 case Bytecodes::_lookupswitch: 3076 do_lookupswitch(); 3077 break; 3078 3079 case Bytecodes::_invokestatic: 3080 case Bytecodes::_invokedynamic: 3081 case Bytecodes::_invokespecial: 3082 case Bytecodes::_invokevirtual: 3083 case Bytecodes::_invokeinterface: 3084 do_call(); 3085 break; 3086 case Bytecodes::_checkcast: 3087 do_checkcast(); 3088 break; 3089 case Bytecodes::_instanceof: 3090 do_instanceof(); 3091 break; 3092 case Bytecodes::_anewarray: 3093 do_newarray(); 3094 break; 3095 case Bytecodes::_newarray: 3096 do_newarray((BasicType)iter().get_index()); 3097 break; 3098 case Bytecodes::_multianewarray: 3099 do_multianewarray(); 3100 break; 3101 case Bytecodes::_new: 3102 do_new(); 3103 break; 3104 case Bytecodes::_defaultvalue: 3105 do_defaultvalue(); 3106 break; 3107 case Bytecodes::_withfield: 3108 do_withfield(); 3109 break; 3110 3111 case Bytecodes::_jsr: 3112 case Bytecodes::_jsr_w: 3113 do_jsr(); 3114 break; 3115 3116 case Bytecodes::_ret: 3117 do_ret(); 3118 break; 3119 3120 3121 case Bytecodes::_monitorenter: 3122 do_monitor_enter(); 3123 break; 3124 3125 case Bytecodes::_monitorexit: 3126 do_monitor_exit(); 3127 break; 3128 3129 case Bytecodes::_breakpoint: 3130 // Breakpoint set concurrently to compile 3131 // %%% use an uncommon trap? 3132 C->record_failure("breakpoint in method"); 3133 return; 3134 3135 default: 3136 #ifndef PRODUCT 3137 map()->dump(99); 3138 #endif 3139 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 3140 ShouldNotReachHere(); 3141 } 3142 3143 #ifndef PRODUCT 3144 IdealGraphPrinter *printer = C->printer(); 3145 if (printer && printer->should_print(1)) { 3146 char buffer[256]; 3147 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 3148 bool old = printer->traverse_outs(); 3149 printer->set_traverse_outs(true); 3150 printer->print_method(buffer, 4); 3151 printer->set_traverse_outs(old); 3152 } 3153 #endif 3154 }