1 /* 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/matcher.hpp" 40 #include "opto/memnode.hpp" 41 #include "opto/mulnode.hpp" 42 #include "opto/opaquenode.hpp" 43 #include "opto/parse.hpp" 44 #include "opto/runtime.hpp" 45 #include "opto/shenandoahSupport.hpp" 46 #include "runtime/deoptimization.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 49 #ifndef PRODUCT 50 extern int explicit_null_checks_inserted, 51 explicit_null_checks_elided; 52 #endif 53 54 //---------------------------------array_load---------------------------------- 55 void Parse::array_load(BasicType elem_type) { 56 const Type* elem = Type::TOP; 57 Node* adr = array_addressing(elem_type, 0, false, &elem); 58 if (stopped()) return; // guaranteed null or range check 59 dec_sp(2); // Pop array and index 60 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 61 Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered); 62 push(ld); 63 } 64 65 66 //--------------------------------array_store---------------------------------- 67 void Parse::array_store(BasicType elem_type) { 68 const Type* elem = Type::TOP; 69 Node* adr = array_addressing(elem_type, 1, true, &elem); 70 if (stopped()) return; // guaranteed null or range check 71 Node* val = pop(); 72 dec_sp(2); // Pop array and index 73 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 74 if (elem == TypeInt::BOOL) { 75 elem_type = T_BOOLEAN; 76 } 77 store_to_memory(control(), adr, val, elem_type, adr_type, StoreNode::release_if_reference(elem_type)); 78 } 79 80 81 //------------------------------array_addressing------------------------------- 82 // Pull array and index from the stack. Compute pointer-to-element. 83 Node* Parse::array_addressing(BasicType type, int vals, bool is_store, const Type* *result2) { 84 Node *idx = peek(0+vals); // Get from stack without popping 85 Node *ary = peek(1+vals); // in case of exception 86 87 // Null check the array base, with correct stack contents 88 ary = null_check(ary, T_ARRAY); 89 // Compile-time detect of null-exception? 90 if (stopped()) return top(); 91 92 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 93 const TypeInt* sizetype = arytype->size(); 94 const Type* elemtype = arytype->elem(); 95 96 if (UseUniqueSubclasses && result2 != NULL) { 97 const Type* el = elemtype->make_ptr(); 98 if (el && el->isa_instptr()) { 99 const TypeInstPtr* toop = el->is_instptr(); 100 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 101 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 102 const Type* subklass = Type::get_const_type(toop->klass()); 103 elemtype = subklass->join_speculative(el); 104 } 105 } 106 } 107 108 // Check for big class initializers with all constant offsets 109 // feeding into a known-size array. 110 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 111 // See if the highest idx value is less than the lowest array bound, 112 // and if the idx value cannot be negative: 113 bool need_range_check = true; 114 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 115 need_range_check = false; 116 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 117 } 118 119 ciKlass * arytype_klass = arytype->klass(); 120 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 121 // Only fails for some -Xcomp runs 122 // The class is unloaded. We have to run this bytecode in the interpreter. 123 uncommon_trap(Deoptimization::Reason_unloaded, 124 Deoptimization::Action_reinterpret, 125 arytype->klass(), "!loaded array"); 126 return top(); 127 } 128 129 // Do the range check 130 if (GenerateRangeChecks && need_range_check) { 131 Node* tst; 132 if (sizetype->_hi <= 0) { 133 // The greatest array bound is negative, so we can conclude that we're 134 // compiling unreachable code, but the unsigned compare trick used below 135 // only works with non-negative lengths. Instead, hack "tst" to be zero so 136 // the uncommon_trap path will always be taken. 137 tst = _gvn.intcon(0); 138 } else { 139 // Range is constant in array-oop, so we can use the original state of mem 140 Node* len = load_array_length(ary); 141 142 // Test length vs index (standard trick using unsigned compare) 143 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 144 BoolTest::mask btest = BoolTest::lt; 145 tst = _gvn.transform( new BoolNode(chk, btest) ); 146 } 147 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 148 _gvn.set_type(rc, rc->Value(&_gvn)); 149 if (!tst->is_Con()) { 150 record_for_igvn(rc); 151 } 152 set_control(_gvn.transform(new IfTrueNode(rc))); 153 // Branch to failure if out of bounds 154 { 155 PreserveJVMState pjvms(this); 156 set_control(_gvn.transform(new IfFalseNode(rc))); 157 if (C->allow_range_check_smearing()) { 158 // Do not use builtin_throw, since range checks are sometimes 159 // made more stringent by an optimistic transformation. 160 // This creates "tentative" range checks at this point, 161 // which are not guaranteed to throw exceptions. 162 // See IfNode::Ideal, is_range_check, adjust_check. 163 uncommon_trap(Deoptimization::Reason_range_check, 164 Deoptimization::Action_make_not_entrant, 165 NULL, "range_check"); 166 } else { 167 // If we have already recompiled with the range-check-widening 168 // heroic optimization turned off, then we must really be throwing 169 // range check exceptions. 170 builtin_throw(Deoptimization::Reason_range_check, idx); 171 } 172 } 173 } 174 // Check for always knowing you are throwing a range-check exception 175 if (stopped()) return top(); 176 177 if (is_store) { 178 ary = shenandoah_write_barrier(ary); 179 } else { 180 ary = shenandoah_read_barrier(ary); 181 } 182 183 // Make array address computation control dependent to prevent it 184 // from floating above the range check during loop optimizations. 185 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 186 187 if (result2 != NULL) *result2 = elemtype; 188 189 assert(ptr != top(), "top should go hand-in-hand with stopped"); 190 191 return ptr; 192 } 193 194 195 // returns IfNode 196 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) { 197 Node *cmp = _gvn.transform( new CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 198 Node *tst = _gvn.transform( new BoolNode( cmp, mask)); 199 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN ); 200 return iff; 201 } 202 203 // return Region node 204 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 205 Node *region = new RegionNode(3); // 2 results 206 record_for_igvn(region); 207 region->init_req(1, iffalse); 208 region->init_req(2, iftrue ); 209 _gvn.set_type(region, Type::CONTROL); 210 region = _gvn.transform(region); 211 set_control (region); 212 return region; 213 } 214 215 216 //------------------------------helper for tableswitch------------------------- 217 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 218 // True branch, use existing map info 219 { PreserveJVMState pjvms(this); 220 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 221 set_control( iftrue ); 222 profile_switch_case(prof_table_index); 223 merge_new_path(dest_bci_if_true); 224 } 225 226 // False branch 227 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 228 set_control( iffalse ); 229 } 230 231 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 232 // True branch, use existing map info 233 { PreserveJVMState pjvms(this); 234 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 235 set_control( iffalse ); 236 profile_switch_case(prof_table_index); 237 merge_new_path(dest_bci_if_true); 238 } 239 240 // False branch 241 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 242 set_control( iftrue ); 243 } 244 245 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) { 246 // False branch, use existing map and control() 247 profile_switch_case(prof_table_index); 248 merge_new_path(dest_bci); 249 } 250 251 252 extern "C" { 253 static int jint_cmp(const void *i, const void *j) { 254 int a = *(jint *)i; 255 int b = *(jint *)j; 256 return a > b ? 1 : a < b ? -1 : 0; 257 } 258 } 259 260 261 // Default value for methodData switch indexing. Must be a negative value to avoid 262 // conflict with any legal switch index. 263 #define NullTableIndex -1 264 265 class SwitchRange : public StackObj { 266 // a range of integers coupled with a bci destination 267 jint _lo; // inclusive lower limit 268 jint _hi; // inclusive upper limit 269 int _dest; 270 int _table_index; // index into method data table 271 272 public: 273 jint lo() const { return _lo; } 274 jint hi() const { return _hi; } 275 int dest() const { return _dest; } 276 int table_index() const { return _table_index; } 277 bool is_singleton() const { return _lo == _hi; } 278 279 void setRange(jint lo, jint hi, int dest, int table_index) { 280 assert(lo <= hi, "must be a non-empty range"); 281 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; 282 } 283 bool adjoinRange(jint lo, jint hi, int dest, int table_index) { 284 assert(lo <= hi, "must be a non-empty range"); 285 if (lo == _hi+1 && dest == _dest && table_index == _table_index) { 286 _hi = hi; 287 return true; 288 } 289 return false; 290 } 291 292 void set (jint value, int dest, int table_index) { 293 setRange(value, value, dest, table_index); 294 } 295 bool adjoin(jint value, int dest, int table_index) { 296 return adjoinRange(value, value, dest, table_index); 297 } 298 299 void print() { 300 if (is_singleton()) 301 tty->print(" {%d}=>%d", lo(), dest()); 302 else if (lo() == min_jint) 303 tty->print(" {..%d}=>%d", hi(), dest()); 304 else if (hi() == max_jint) 305 tty->print(" {%d..}=>%d", lo(), dest()); 306 else 307 tty->print(" {%d..%d}=>%d", lo(), hi(), dest()); 308 } 309 }; 310 311 312 //-------------------------------do_tableswitch-------------------------------- 313 void Parse::do_tableswitch() { 314 Node* lookup = pop(); 315 316 // Get information about tableswitch 317 int default_dest = iter().get_dest_table(0); 318 int lo_index = iter().get_int_table(1); 319 int hi_index = iter().get_int_table(2); 320 int len = hi_index - lo_index + 1; 321 322 if (len < 1) { 323 // If this is a backward branch, add safepoint 324 maybe_add_safepoint(default_dest); 325 merge(default_dest); 326 return; 327 } 328 329 // generate decision tree, using trichotomy when possible 330 int rnum = len+2; 331 bool makes_backward_branch = false; 332 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 333 int rp = -1; 334 if (lo_index != min_jint) { 335 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex); 336 } 337 for (int j = 0; j < len; j++) { 338 jint match_int = lo_index+j; 339 int dest = iter().get_dest_table(j+3); 340 makes_backward_branch |= (dest <= bci()); 341 int table_index = method_data_update() ? j : NullTableIndex; 342 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) { 343 ranges[++rp].set(match_int, dest, table_index); 344 } 345 } 346 jint highest = lo_index+(len-1); 347 assert(ranges[rp].hi() == highest, ""); 348 if (highest != max_jint 349 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) { 350 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 351 } 352 assert(rp < len+2, "not too many ranges"); 353 354 // Safepoint in case if backward branch observed 355 if( makes_backward_branch && UseLoopSafepoints ) 356 add_safepoint(); 357 358 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 359 } 360 361 362 //------------------------------do_lookupswitch-------------------------------- 363 void Parse::do_lookupswitch() { 364 Node *lookup = pop(); // lookup value 365 // Get information about lookupswitch 366 int default_dest = iter().get_dest_table(0); 367 int len = iter().get_int_table(1); 368 369 if (len < 1) { // If this is a backward branch, add safepoint 370 maybe_add_safepoint(default_dest); 371 merge(default_dest); 372 return; 373 } 374 375 // generate decision tree, using trichotomy when possible 376 jint* table = NEW_RESOURCE_ARRAY(jint, len*2); 377 { 378 for( int j = 0; j < len; j++ ) { 379 table[j+j+0] = iter().get_int_table(2+j+j); 380 table[j+j+1] = iter().get_dest_table(2+j+j+1); 381 } 382 qsort( table, len, 2*sizeof(table[0]), jint_cmp ); 383 } 384 385 int rnum = len*2+1; 386 bool makes_backward_branch = false; 387 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 388 int rp = -1; 389 for( int j = 0; j < len; j++ ) { 390 jint match_int = table[j+j+0]; 391 int dest = table[j+j+1]; 392 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 393 int table_index = method_data_update() ? j : NullTableIndex; 394 makes_backward_branch |= (dest <= bci()); 395 if( match_int != next_lo ) { 396 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex); 397 } 398 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) { 399 ranges[++rp].set(match_int, dest, table_index); 400 } 401 } 402 jint highest = table[2*(len-1)]; 403 assert(ranges[rp].hi() == highest, ""); 404 if( highest != max_jint 405 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) { 406 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 407 } 408 assert(rp < rnum, "not too many ranges"); 409 410 // Safepoint in case backward branch observed 411 if( makes_backward_branch && UseLoopSafepoints ) 412 add_safepoint(); 413 414 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 415 } 416 417 //----------------------------create_jump_tables------------------------------- 418 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 419 // Are jumptables enabled 420 if (!UseJumpTables) return false; 421 422 // Are jumptables supported 423 if (!Matcher::has_match_rule(Op_Jump)) return false; 424 425 // Don't make jump table if profiling 426 if (method_data_update()) return false; 427 428 // Decide if a guard is needed to lop off big ranges at either (or 429 // both) end(s) of the input set. We'll call this the default target 430 // even though we can't be sure that it is the true "default". 431 432 bool needs_guard = false; 433 int default_dest; 434 int64_t total_outlier_size = 0; 435 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 436 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 437 438 if (lo->dest() == hi->dest()) { 439 total_outlier_size = hi_size + lo_size; 440 default_dest = lo->dest(); 441 } else if (lo_size > hi_size) { 442 total_outlier_size = lo_size; 443 default_dest = lo->dest(); 444 } else { 445 total_outlier_size = hi_size; 446 default_dest = hi->dest(); 447 } 448 449 // If a guard test will eliminate very sparse end ranges, then 450 // it is worth the cost of an extra jump. 451 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 452 needs_guard = true; 453 if (default_dest == lo->dest()) lo++; 454 if (default_dest == hi->dest()) hi--; 455 } 456 457 // Find the total number of cases and ranges 458 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 459 int num_range = hi - lo + 1; 460 461 // Don't create table if: too large, too small, or too sparse. 462 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize) 463 return false; 464 if (num_cases > (MaxJumpTableSparseness * num_range)) 465 return false; 466 467 // Normalize table lookups to zero 468 int lowval = lo->lo(); 469 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 470 471 // Generate a guard to protect against input keyvals that aren't 472 // in the switch domain. 473 if (needs_guard) { 474 Node* size = _gvn.intcon(num_cases); 475 Node* cmp = _gvn.transform( new CmpUNode(key_val, size) ); 476 Node* tst = _gvn.transform( new BoolNode(cmp, BoolTest::ge) ); 477 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN); 478 jump_if_true_fork(iff, default_dest, NullTableIndex); 479 } 480 481 // Create an ideal node JumpTable that has projections 482 // of all possible ranges for a switch statement 483 // The key_val input must be converted to a pointer offset and scaled. 484 // Compare Parse::array_addressing above. 485 486 // Clean the 32-bit int into a real 64-bit offset. 487 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 488 const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); 489 // Make I2L conversion control dependent to prevent it from 490 // floating above the range check during loop optimizations. 491 key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); 492 493 // Shift the value by wordsize so we have an index into the table, rather 494 // than a switch value 495 Node *shiftWord = _gvn.MakeConX(wordSize); 496 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 497 498 // Create the JumpNode 499 Node* jtn = _gvn.transform( new JumpNode(control(), key_val, num_cases) ); 500 501 // These are the switch destinations hanging off the jumpnode 502 int i = 0; 503 for (SwitchRange* r = lo; r <= hi; r++) { 504 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 505 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 506 { 507 PreserveJVMState pjvms(this); 508 set_control(input); 509 jump_if_always_fork(r->dest(), r->table_index()); 510 } 511 } 512 } 513 assert(i == num_cases, "miscount of cases"); 514 stop_and_kill_map(); // no more uses for this JVMS 515 return true; 516 } 517 518 //----------------------------jump_switch_ranges------------------------------- 519 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 520 Block* switch_block = block(); 521 522 if (switch_depth == 0) { 523 // Do special processing for the top-level call. 524 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 525 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 526 527 // Decrement pred-numbers for the unique set of nodes. 528 #ifdef ASSERT 529 // Ensure that the block's successors are a (duplicate-free) set. 530 int successors_counted = 0; // block occurrences in [hi..lo] 531 int unique_successors = switch_block->num_successors(); 532 for (int i = 0; i < unique_successors; i++) { 533 Block* target = switch_block->successor_at(i); 534 535 // Check that the set of successors is the same in both places. 536 int successors_found = 0; 537 for (SwitchRange* p = lo; p <= hi; p++) { 538 if (p->dest() == target->start()) successors_found++; 539 } 540 assert(successors_found > 0, "successor must be known"); 541 successors_counted += successors_found; 542 } 543 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 544 #endif 545 546 // Maybe prune the inputs, based on the type of key_val. 547 jint min_val = min_jint; 548 jint max_val = max_jint; 549 const TypeInt* ti = key_val->bottom_type()->isa_int(); 550 if (ti != NULL) { 551 min_val = ti->_lo; 552 max_val = ti->_hi; 553 assert(min_val <= max_val, "invalid int type"); 554 } 555 while (lo->hi() < min_val) lo++; 556 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index()); 557 while (hi->lo() > max_val) hi--; 558 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index()); 559 } 560 561 #ifndef PRODUCT 562 if (switch_depth == 0) { 563 _max_switch_depth = 0; 564 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 565 } 566 #endif 567 568 assert(lo <= hi, "must be a non-empty set of ranges"); 569 if (lo == hi) { 570 jump_if_always_fork(lo->dest(), lo->table_index()); 571 } else { 572 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 573 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 574 575 if (create_jump_tables(key_val, lo, hi)) return; 576 577 int nr = hi - lo + 1; 578 579 SwitchRange* mid = lo + nr/2; 580 // if there is an easy choice, pivot at a singleton: 581 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 582 583 assert(lo < mid && mid <= hi, "good pivot choice"); 584 assert(nr != 2 || mid == hi, "should pick higher of 2"); 585 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 586 587 Node *test_val = _gvn.intcon(mid->lo()); 588 589 if (mid->is_singleton()) { 590 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne); 591 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index()); 592 593 // Special Case: If there are exactly three ranges, and the high 594 // and low range each go to the same place, omit the "gt" test, 595 // since it will not discriminate anything. 596 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest()); 597 if (eq_test_only) { 598 assert(mid == hi-1, ""); 599 } 600 601 // if there is a higher range, test for it and process it: 602 if (mid < hi && !eq_test_only) { 603 // two comparisons of same values--should enable 1 test for 2 branches 604 // Use BoolTest::le instead of BoolTest::gt 605 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le); 606 Node *iftrue = _gvn.transform( new IfTrueNode(iff_le) ); 607 Node *iffalse = _gvn.transform( new IfFalseNode(iff_le) ); 608 { PreserveJVMState pjvms(this); 609 set_control(iffalse); 610 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 611 } 612 set_control(iftrue); 613 } 614 615 } else { 616 // mid is a range, not a singleton, so treat mid..hi as a unit 617 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge); 618 619 // if there is a higher range, test for it and process it: 620 if (mid == hi) { 621 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index()); 622 } else { 623 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 624 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 625 { PreserveJVMState pjvms(this); 626 set_control(iftrue); 627 jump_switch_ranges(key_val, mid, hi, switch_depth+1); 628 } 629 set_control(iffalse); 630 } 631 } 632 633 // in any case, process the lower range 634 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 635 } 636 637 // Decrease pred_count for each successor after all is done. 638 if (switch_depth == 0) { 639 int unique_successors = switch_block->num_successors(); 640 for (int i = 0; i < unique_successors; i++) { 641 Block* target = switch_block->successor_at(i); 642 // Throw away the pre-allocated path for each unique successor. 643 target->next_path_num(); 644 } 645 } 646 647 #ifndef PRODUCT 648 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 649 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 650 SwitchRange* r; 651 int nsing = 0; 652 for( r = lo; r <= hi; r++ ) { 653 if( r->is_singleton() ) nsing++; 654 } 655 tty->print(">>> "); 656 _method->print_short_name(); 657 tty->print_cr(" switch decision tree"); 658 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 659 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 660 if (_max_switch_depth > _est_switch_depth) { 661 tty->print_cr("******** BAD SWITCH DEPTH ********"); 662 } 663 tty->print(" "); 664 for( r = lo; r <= hi; r++ ) { 665 r->print(); 666 } 667 tty->cr(); 668 } 669 #endif 670 } 671 672 void Parse::modf() { 673 Node *f2 = pop(); 674 Node *f1 = pop(); 675 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 676 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 677 "frem", NULL, //no memory effects 678 f1, f2); 679 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 680 681 push(res); 682 } 683 684 void Parse::modd() { 685 Node *d2 = pop_pair(); 686 Node *d1 = pop_pair(); 687 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 688 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 689 "drem", NULL, //no memory effects 690 d1, top(), d2, top()); 691 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 692 693 #ifdef ASSERT 694 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 695 assert(res_top == top(), "second value must be top"); 696 #endif 697 698 push_pair(res_d); 699 } 700 701 void Parse::l2f() { 702 Node* f2 = pop(); 703 Node* f1 = pop(); 704 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 705 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 706 "l2f", NULL, //no memory effects 707 f1, f2); 708 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 709 710 push(res); 711 } 712 713 void Parse::do_irem() { 714 // Must keep both values on the expression-stack during null-check 715 zero_check_int(peek()); 716 // Compile-time detect of null-exception? 717 if (stopped()) return; 718 719 Node* b = pop(); 720 Node* a = pop(); 721 722 const Type *t = _gvn.type(b); 723 if (t != Type::TOP) { 724 const TypeInt *ti = t->is_int(); 725 if (ti->is_con()) { 726 int divisor = ti->get_con(); 727 // check for positive power of 2 728 if (divisor > 0 && 729 (divisor & ~(divisor-1)) == divisor) { 730 // yes ! 731 Node *mask = _gvn.intcon((divisor - 1)); 732 // Sigh, must handle negative dividends 733 Node *zero = _gvn.intcon(0); 734 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt); 735 Node *iff = _gvn.transform( new IfFalseNode(ifff) ); 736 Node *ift = _gvn.transform( new IfTrueNode (ifff) ); 737 Node *reg = jump_if_join(ift, iff); 738 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 739 // Negative path; negate/and/negate 740 Node *neg = _gvn.transform( new SubINode(zero, a) ); 741 Node *andn= _gvn.transform( new AndINode(neg, mask) ); 742 Node *negn= _gvn.transform( new SubINode(zero, andn) ); 743 phi->init_req(1, negn); 744 // Fast positive case 745 Node *andx = _gvn.transform( new AndINode(a, mask) ); 746 phi->init_req(2, andx); 747 // Push the merge 748 push( _gvn.transform(phi) ); 749 return; 750 } 751 } 752 } 753 // Default case 754 push( _gvn.transform( new ModINode(control(),a,b) ) ); 755 } 756 757 // Handle jsr and jsr_w bytecode 758 void Parse::do_jsr() { 759 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 760 761 // Store information about current state, tagged with new _jsr_bci 762 int return_bci = iter().next_bci(); 763 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 764 765 // Update method data 766 profile_taken_branch(jsr_bci); 767 768 // The way we do things now, there is only one successor block 769 // for the jsr, because the target code is cloned by ciTypeFlow. 770 Block* target = successor_for_bci(jsr_bci); 771 772 // What got pushed? 773 const Type* ret_addr = target->peek(); 774 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 775 776 // Effect on jsr on stack 777 push(_gvn.makecon(ret_addr)); 778 779 // Flow to the jsr. 780 merge(jsr_bci); 781 } 782 783 // Handle ret bytecode 784 void Parse::do_ret() { 785 // Find to whom we return. 786 assert(block()->num_successors() == 1, "a ret can only go one place now"); 787 Block* target = block()->successor_at(0); 788 assert(!target->is_ready(), "our arrival must be expected"); 789 profile_ret(target->flow()->start()); 790 int pnum = target->next_path_num(); 791 merge_common(target, pnum); 792 } 793 794 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 795 if (btest != BoolTest::eq && btest != BoolTest::ne) { 796 // Only ::eq and ::ne are supported for profile injection. 797 return false; 798 } 799 if (test->is_Cmp() && 800 test->in(1)->Opcode() == Op_ProfileBoolean) { 801 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 802 int false_cnt = profile->false_count(); 803 int true_cnt = profile->true_count(); 804 805 // Counts matching depends on the actual test operation (::eq or ::ne). 806 // No need to scale the counts because profile injection was designed 807 // to feed exact counts into VM. 808 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 809 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 810 811 profile->consume(); 812 return true; 813 } 814 return false; 815 } 816 //--------------------------dynamic_branch_prediction-------------------------- 817 // Try to gather dynamic branch prediction behavior. Return a probability 818 // of the branch being taken and set the "cnt" field. Returns a -1.0 819 // if we need to use static prediction for some reason. 820 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 821 ResourceMark rm; 822 823 cnt = COUNT_UNKNOWN; 824 825 int taken = 0; 826 int not_taken = 0; 827 828 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 829 830 if (use_mdo) { 831 // Use MethodData information if it is available 832 // FIXME: free the ProfileData structure 833 ciMethodData* methodData = method()->method_data(); 834 if (!methodData->is_mature()) return PROB_UNKNOWN; 835 ciProfileData* data = methodData->bci_to_data(bci()); 836 if (data == NULL) { 837 return PROB_UNKNOWN; 838 } 839 if (!data->is_JumpData()) return PROB_UNKNOWN; 840 841 // get taken and not taken values 842 taken = data->as_JumpData()->taken(); 843 not_taken = 0; 844 if (data->is_BranchData()) { 845 not_taken = data->as_BranchData()->not_taken(); 846 } 847 848 // scale the counts to be commensurate with invocation counts: 849 taken = method()->scale_count(taken); 850 not_taken = method()->scale_count(not_taken); 851 } 852 853 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 854 // We also check that individual counters are positive first, otherwise the sum can become positive. 855 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 856 if (C->log() != NULL) { 857 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 858 } 859 return PROB_UNKNOWN; 860 } 861 862 // Compute frequency that we arrive here 863 float sum = taken + not_taken; 864 // Adjust, if this block is a cloned private block but the 865 // Jump counts are shared. Taken the private counts for 866 // just this path instead of the shared counts. 867 if( block()->count() > 0 ) 868 sum = block()->count(); 869 cnt = sum / FreqCountInvocations; 870 871 // Pin probability to sane limits 872 float prob; 873 if( !taken ) 874 prob = (0+PROB_MIN) / 2; 875 else if( !not_taken ) 876 prob = (1+PROB_MAX) / 2; 877 else { // Compute probability of true path 878 prob = (float)taken / (float)(taken + not_taken); 879 if (prob > PROB_MAX) prob = PROB_MAX; 880 if (prob < PROB_MIN) prob = PROB_MIN; 881 } 882 883 assert((cnt > 0.0f) && (prob > 0.0f), 884 "Bad frequency assignment in if"); 885 886 if (C->log() != NULL) { 887 const char* prob_str = NULL; 888 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 889 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 890 char prob_str_buf[30]; 891 if (prob_str == NULL) { 892 sprintf(prob_str_buf, "%g", prob); 893 prob_str = prob_str_buf; 894 } 895 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 896 iter().get_dest(), taken, not_taken, cnt, prob_str); 897 } 898 return prob; 899 } 900 901 //-----------------------------branch_prediction------------------------------- 902 float Parse::branch_prediction(float& cnt, 903 BoolTest::mask btest, 904 int target_bci, 905 Node* test) { 906 float prob = dynamic_branch_prediction(cnt, btest, test); 907 // If prob is unknown, switch to static prediction 908 if (prob != PROB_UNKNOWN) return prob; 909 910 prob = PROB_FAIR; // Set default value 911 if (btest == BoolTest::eq) // Exactly equal test? 912 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 913 else if (btest == BoolTest::ne) 914 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 915 916 // If this is a conditional test guarding a backwards branch, 917 // assume its a loop-back edge. Make it a likely taken branch. 918 if (target_bci < bci()) { 919 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 920 // Since it's an OSR, we probably have profile data, but since 921 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 922 // Let's make a special check here for completely zero counts. 923 ciMethodData* methodData = method()->method_data(); 924 if (!methodData->is_empty()) { 925 ciProfileData* data = methodData->bci_to_data(bci()); 926 // Only stop for truly zero counts, which mean an unknown part 927 // of the OSR-ed method, and we want to deopt to gather more stats. 928 // If you have ANY counts, then this loop is simply 'cold' relative 929 // to the OSR loop. 930 if (data == NULL || 931 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 932 // This is the only way to return PROB_UNKNOWN: 933 return PROB_UNKNOWN; 934 } 935 } 936 } 937 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 938 } 939 940 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 941 return prob; 942 } 943 944 // The magic constants are chosen so as to match the output of 945 // branch_prediction() when the profile reports a zero taken count. 946 // It is important to distinguish zero counts unambiguously, because 947 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 948 // very small but nonzero probabilities, which if confused with zero 949 // counts would keep the program recompiling indefinitely. 950 bool Parse::seems_never_taken(float prob) const { 951 return prob < PROB_MIN; 952 } 953 954 // True if the comparison seems to be the kind that will not change its 955 // statistics from true to false. See comments in adjust_map_after_if. 956 // This question is only asked along paths which are already 957 // classifed as untaken (by seems_never_taken), so really, 958 // if a path is never taken, its controlling comparison is 959 // already acting in a stable fashion. If the comparison 960 // seems stable, we will put an expensive uncommon trap 961 // on the untaken path. 962 bool Parse::seems_stable_comparison() const { 963 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 964 return false; 965 } 966 return true; 967 } 968 969 //-------------------------------repush_if_args-------------------------------- 970 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 971 inline int Parse::repush_if_args() { 972 if (PrintOpto && WizardMode) { 973 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 974 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 975 method()->print_name(); tty->cr(); 976 } 977 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 978 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 979 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 980 assert(argument(0) != NULL, "must exist"); 981 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 982 inc_sp(bc_depth); 983 return bc_depth; 984 } 985 986 //----------------------------------do_ifnull---------------------------------- 987 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 988 int target_bci = iter().get_dest(); 989 990 Block* branch_block = successor_for_bci(target_bci); 991 Block* next_block = successor_for_bci(iter().next_bci()); 992 993 float cnt; 994 float prob = branch_prediction(cnt, btest, target_bci, c); 995 if (prob == PROB_UNKNOWN) { 996 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 997 if (PrintOpto && Verbose) { 998 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 999 } 1000 repush_if_args(); // to gather stats on loop 1001 // We need to mark this branch as taken so that if we recompile we will 1002 // see that it is possible. In the tiered system the interpreter doesn't 1003 // do profiling and by the time we get to the lower tier from the interpreter 1004 // the path may be cold again. Make sure it doesn't look untaken 1005 profile_taken_branch(target_bci, !ProfileInterpreter); 1006 uncommon_trap(Deoptimization::Reason_unreached, 1007 Deoptimization::Action_reinterpret, 1008 NULL, "cold"); 1009 if (C->eliminate_boxing()) { 1010 // Mark the successor blocks as parsed 1011 branch_block->next_path_num(); 1012 next_block->next_path_num(); 1013 } 1014 return; 1015 } 1016 1017 NOT_PRODUCT(explicit_null_checks_inserted++); 1018 1019 // Generate real control flow 1020 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1021 1022 // Sanity check the probability value 1023 assert(prob > 0.0f,"Bad probability in Parser"); 1024 // Need xform to put node in hash table 1025 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1026 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1027 // True branch 1028 { PreserveJVMState pjvms(this); 1029 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1030 set_control(iftrue); 1031 1032 if (stopped()) { // Path is dead? 1033 NOT_PRODUCT(explicit_null_checks_elided++); 1034 if (C->eliminate_boxing()) { 1035 // Mark the successor block as parsed 1036 branch_block->next_path_num(); 1037 } 1038 } else { // Path is live. 1039 // Update method data 1040 profile_taken_branch(target_bci); 1041 adjust_map_after_if(btest, c, prob, branch_block, next_block); 1042 if (!stopped()) { 1043 merge(target_bci); 1044 } 1045 } 1046 } 1047 1048 // False branch 1049 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1050 set_control(iffalse); 1051 1052 if (stopped()) { // Path is dead? 1053 NOT_PRODUCT(explicit_null_checks_elided++); 1054 if (C->eliminate_boxing()) { 1055 // Mark the successor block as parsed 1056 next_block->next_path_num(); 1057 } 1058 } else { // Path is live. 1059 // Update method data 1060 profile_not_taken_branch(); 1061 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, 1062 next_block, branch_block); 1063 } 1064 } 1065 1066 //------------------------------------do_if------------------------------------ 1067 void Parse::do_if(BoolTest::mask btest, Node* c) { 1068 int target_bci = iter().get_dest(); 1069 1070 Block* branch_block = successor_for_bci(target_bci); 1071 Block* next_block = successor_for_bci(iter().next_bci()); 1072 1073 float cnt; 1074 float prob = branch_prediction(cnt, btest, target_bci, c); 1075 float untaken_prob = 1.0 - prob; 1076 1077 if (prob == PROB_UNKNOWN) { 1078 if (PrintOpto && Verbose) { 1079 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1080 } 1081 repush_if_args(); // to gather stats on loop 1082 // We need to mark this branch as taken so that if we recompile we will 1083 // see that it is possible. In the tiered system the interpreter doesn't 1084 // do profiling and by the time we get to the lower tier from the interpreter 1085 // the path may be cold again. Make sure it doesn't look untaken 1086 profile_taken_branch(target_bci, !ProfileInterpreter); 1087 uncommon_trap(Deoptimization::Reason_unreached, 1088 Deoptimization::Action_reinterpret, 1089 NULL, "cold"); 1090 if (C->eliminate_boxing()) { 1091 // Mark the successor blocks as parsed 1092 branch_block->next_path_num(); 1093 next_block->next_path_num(); 1094 } 1095 return; 1096 } 1097 1098 // Sanity check the probability value 1099 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1100 1101 bool taken_if_true = true; 1102 // Convert BoolTest to canonical form: 1103 if (!BoolTest(btest).is_canonical()) { 1104 btest = BoolTest(btest).negate(); 1105 taken_if_true = false; 1106 // prob is NOT updated here; it remains the probability of the taken 1107 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1108 } 1109 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1110 1111 Node* tst0 = new BoolNode(c, btest); 1112 Node* tst = _gvn.transform(tst0); 1113 BoolTest::mask taken_btest = BoolTest::illegal; 1114 BoolTest::mask untaken_btest = BoolTest::illegal; 1115 1116 if (tst->is_Bool()) { 1117 // Refresh c from the transformed bool node, since it may be 1118 // simpler than the original c. Also re-canonicalize btest. 1119 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1120 // That can arise from statements like: if (x instanceof C) ... 1121 if (tst != tst0) { 1122 // Canonicalize one more time since transform can change it. 1123 btest = tst->as_Bool()->_test._test; 1124 if (!BoolTest(btest).is_canonical()) { 1125 // Reverse edges one more time... 1126 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1127 btest = tst->as_Bool()->_test._test; 1128 assert(BoolTest(btest).is_canonical(), "sanity"); 1129 taken_if_true = !taken_if_true; 1130 } 1131 c = tst->in(1); 1132 } 1133 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1134 taken_btest = taken_if_true ? btest : neg_btest; 1135 untaken_btest = taken_if_true ? neg_btest : btest; 1136 } 1137 1138 // Generate real control flow 1139 float true_prob = (taken_if_true ? prob : untaken_prob); 1140 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1141 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1142 Node* taken_branch = new IfTrueNode(iff); 1143 Node* untaken_branch = new IfFalseNode(iff); 1144 if (!taken_if_true) { // Finish conversion to canonical form 1145 Node* tmp = taken_branch; 1146 taken_branch = untaken_branch; 1147 untaken_branch = tmp; 1148 } 1149 1150 taken_branch = _gvn.transform(taken_branch); 1151 untaken_branch = _gvn.transform(untaken_branch); 1152 Node* taken_memory = NULL; 1153 Node* untaken_memory = NULL; 1154 1155 ShenandoahBarrierNode::do_cmpp_if(*this, taken_branch, untaken_branch, taken_memory, untaken_memory); 1156 1157 // Branch is taken: 1158 { PreserveJVMState pjvms(this); 1159 set_control(taken_branch); 1160 if (taken_memory != NULL) { 1161 set_all_memory(taken_memory); 1162 } 1163 1164 if (stopped()) { 1165 if (C->eliminate_boxing()) { 1166 // Mark the successor block as parsed 1167 branch_block->next_path_num(); 1168 } 1169 } else { 1170 // Update method data 1171 profile_taken_branch(target_bci); 1172 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block); 1173 if (!stopped()) { 1174 merge(target_bci); 1175 } 1176 } 1177 } 1178 1179 set_control(untaken_branch); 1180 if (untaken_memory != NULL) { 1181 set_all_memory(untaken_memory); 1182 } 1183 1184 // Branch not taken. 1185 if (stopped()) { 1186 if (C->eliminate_boxing()) { 1187 // Mark the successor block as parsed 1188 next_block->next_path_num(); 1189 } 1190 } else { 1191 // Update method data 1192 profile_not_taken_branch(); 1193 adjust_map_after_if(untaken_btest, c, untaken_prob, 1194 next_block, branch_block); 1195 } 1196 } 1197 1198 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 1199 // Don't want to speculate on uncommon traps when running with -Xcomp 1200 if (!UseInterpreter) { 1201 return false; 1202 } 1203 return (seems_never_taken(prob) && seems_stable_comparison()); 1204 } 1205 1206 //----------------------------adjust_map_after_if------------------------------ 1207 // Adjust the JVM state to reflect the result of taking this path. 1208 // Basically, it means inspecting the CmpNode controlling this 1209 // branch, seeing how it constrains a tested value, and then 1210 // deciding if it's worth our while to encode this constraint 1211 // as graph nodes in the current abstract interpretation map. 1212 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, 1213 Block* path, Block* other_path) { 1214 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal) 1215 return; // nothing to do 1216 1217 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 1218 1219 if (path_is_suitable_for_uncommon_trap(prob)) { 1220 repush_if_args(); 1221 uncommon_trap(Deoptimization::Reason_unstable_if, 1222 Deoptimization::Action_reinterpret, 1223 NULL, 1224 (is_fallthrough ? "taken always" : "taken never")); 1225 return; 1226 } 1227 1228 Node* val = c->in(1); 1229 Node* con = c->in(2); 1230 const Type* tcon = _gvn.type(con); 1231 const Type* tval = _gvn.type(val); 1232 bool have_con = tcon->singleton(); 1233 if (tval->singleton()) { 1234 if (!have_con) { 1235 // Swap, so constant is in con. 1236 con = val; 1237 tcon = tval; 1238 val = c->in(2); 1239 tval = _gvn.type(val); 1240 btest = BoolTest(btest).commute(); 1241 have_con = true; 1242 } else { 1243 // Do we have two constants? Then leave well enough alone. 1244 have_con = false; 1245 } 1246 } 1247 if (!have_con) // remaining adjustments need a con 1248 return; 1249 1250 sharpen_type_after_if(btest, con, tcon, val, tval); 1251 } 1252 1253 1254 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 1255 Node* ldk; 1256 if (n->is_DecodeNKlass()) { 1257 if (n->in(1)->Opcode() != Op_LoadNKlass) { 1258 return NULL; 1259 } else { 1260 ldk = n->in(1); 1261 } 1262 } else if (n->Opcode() != Op_LoadKlass) { 1263 return NULL; 1264 } else { 1265 ldk = n; 1266 } 1267 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 1268 1269 Node* adr = ldk->in(MemNode::Address); 1270 intptr_t off = 0; 1271 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 1272 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 1273 return NULL; 1274 const TypePtr* tp = gvn->type(obj)->is_ptr(); 1275 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 1276 return NULL; 1277 1278 return obj; 1279 } 1280 1281 void Parse::sharpen_type_after_if(BoolTest::mask btest, 1282 Node* con, const Type* tcon, 1283 Node* val, const Type* tval) { 1284 // Look for opportunities to sharpen the type of a node 1285 // whose klass is compared with a constant klass. 1286 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 1287 Node* obj = extract_obj_from_klass_load(&_gvn, val); 1288 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 1289 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 1290 // Found: 1291 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 1292 // or the narrowOop equivalent. 1293 const Type* obj_type = _gvn.type(obj); 1294 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 1295 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 1296 tboth->higher_equal(obj_type)) { 1297 // obj has to be of the exact type Foo if the CmpP succeeds. 1298 int obj_in_map = map()->find_edge(obj); 1299 JVMState* jvms = this->jvms(); 1300 if (obj_in_map >= 0 && 1301 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 1302 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 1303 const Type* tcc = ccast->as_Type()->type(); 1304 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 1305 // Delay transform() call to allow recovery of pre-cast value 1306 // at the control merge. 1307 _gvn.set_type_bottom(ccast); 1308 record_for_igvn(ccast); 1309 // Here's the payoff. 1310 replace_in_map(obj, ccast); 1311 } 1312 } 1313 } 1314 } 1315 1316 int val_in_map = map()->find_edge(val); 1317 if (val_in_map < 0) return; // replace_in_map would be useless 1318 { 1319 JVMState* jvms = this->jvms(); 1320 if (!(jvms->is_loc(val_in_map) || 1321 jvms->is_stk(val_in_map))) 1322 return; // again, it would be useless 1323 } 1324 1325 // Check for a comparison to a constant, and "know" that the compared 1326 // value is constrained on this path. 1327 assert(tcon->singleton(), ""); 1328 ConstraintCastNode* ccast = NULL; 1329 Node* cast = NULL; 1330 1331 switch (btest) { 1332 case BoolTest::eq: // Constant test? 1333 { 1334 const Type* tboth = tcon->join_speculative(tval); 1335 if (tboth == tval) break; // Nothing to gain. 1336 if (tcon->isa_int()) { 1337 ccast = new CastIINode(val, tboth); 1338 } else if (tcon == TypePtr::NULL_PTR) { 1339 // Cast to null, but keep the pointer identity temporarily live. 1340 ccast = new CastPPNode(val, tboth); 1341 } else { 1342 const TypeF* tf = tcon->isa_float_constant(); 1343 const TypeD* td = tcon->isa_double_constant(); 1344 // Exclude tests vs float/double 0 as these could be 1345 // either +0 or -0. Just because you are equal to +0 1346 // doesn't mean you ARE +0! 1347 // Note, following code also replaces Long and Oop values. 1348 if ((!tf || tf->_f != 0.0) && 1349 (!td || td->_d != 0.0)) 1350 cast = con; // Replace non-constant val by con. 1351 } 1352 } 1353 break; 1354 1355 case BoolTest::ne: 1356 if (tcon == TypePtr::NULL_PTR) { 1357 cast = cast_not_null(val, false); 1358 } 1359 break; 1360 1361 default: 1362 // (At this point we could record int range types with CastII.) 1363 break; 1364 } 1365 1366 if (ccast != NULL) { 1367 const Type* tcc = ccast->as_Type()->type(); 1368 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 1369 // Delay transform() call to allow recovery of pre-cast value 1370 // at the control merge. 1371 ccast->set_req(0, control()); 1372 _gvn.set_type_bottom(ccast); 1373 record_for_igvn(ccast); 1374 cast = ccast; 1375 } 1376 1377 if (cast != NULL) { // Here's the payoff. 1378 replace_in_map(val, cast); 1379 } 1380 } 1381 1382 /** 1383 * Use speculative type to optimize CmpP node: if comparison is 1384 * against the low level class, cast the object to the speculative 1385 * type if any. CmpP should then go away. 1386 * 1387 * @param c expected CmpP node 1388 * @return result of CmpP on object casted to speculative type 1389 * 1390 */ 1391 Node* Parse::optimize_cmp_with_klass(Node* c) { 1392 // If this is transformed by the _gvn to a comparison with the low 1393 // level klass then we may be able to use speculation 1394 if (c->Opcode() == Op_CmpP && 1395 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 1396 c->in(2)->is_Con()) { 1397 Node* load_klass = NULL; 1398 Node* decode = NULL; 1399 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 1400 decode = c->in(1); 1401 load_klass = c->in(1)->in(1); 1402 } else { 1403 load_klass = c->in(1); 1404 } 1405 if (load_klass->in(2)->is_AddP()) { 1406 Node* addp = load_klass->in(2); 1407 Node* obj = addp->in(AddPNode::Address); 1408 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 1409 if (obj_type->speculative_type_not_null() != NULL) { 1410 ciKlass* k = obj_type->speculative_type(); 1411 inc_sp(2); 1412 obj = maybe_cast_profiled_obj(obj, k); 1413 dec_sp(2); 1414 // Make the CmpP use the casted obj 1415 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 1416 load_klass = load_klass->clone(); 1417 load_klass->set_req(2, addp); 1418 load_klass = _gvn.transform(load_klass); 1419 if (decode != NULL) { 1420 decode = decode->clone(); 1421 decode->set_req(1, load_klass); 1422 load_klass = _gvn.transform(decode); 1423 } 1424 c = c->clone(); 1425 c->set_req(1, load_klass); 1426 c = _gvn.transform(c); 1427 } 1428 } 1429 } 1430 return c; 1431 } 1432 1433 //------------------------------do_one_bytecode-------------------------------- 1434 // Parse this bytecode, and alter the Parsers JVM->Node mapping 1435 void Parse::do_one_bytecode() { 1436 Node *a, *b, *c, *d; // Handy temps 1437 BoolTest::mask btest; 1438 int i; 1439 1440 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 1441 1442 if (C->check_node_count(NodeLimitFudgeFactor * 5, 1443 "out of nodes parsing method")) { 1444 return; 1445 } 1446 1447 #ifdef ASSERT 1448 // for setting breakpoints 1449 if (TraceOptoParse) { 1450 tty->print(" @"); 1451 dump_bci(bci()); 1452 tty->cr(); 1453 } 1454 #endif 1455 1456 switch (bc()) { 1457 case Bytecodes::_nop: 1458 // do nothing 1459 break; 1460 case Bytecodes::_lconst_0: 1461 push_pair(longcon(0)); 1462 break; 1463 1464 case Bytecodes::_lconst_1: 1465 push_pair(longcon(1)); 1466 break; 1467 1468 case Bytecodes::_fconst_0: 1469 push(zerocon(T_FLOAT)); 1470 break; 1471 1472 case Bytecodes::_fconst_1: 1473 push(makecon(TypeF::ONE)); 1474 break; 1475 1476 case Bytecodes::_fconst_2: 1477 push(makecon(TypeF::make(2.0f))); 1478 break; 1479 1480 case Bytecodes::_dconst_0: 1481 push_pair(zerocon(T_DOUBLE)); 1482 break; 1483 1484 case Bytecodes::_dconst_1: 1485 push_pair(makecon(TypeD::ONE)); 1486 break; 1487 1488 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 1489 case Bytecodes::_iconst_0: push(intcon( 0)); break; 1490 case Bytecodes::_iconst_1: push(intcon( 1)); break; 1491 case Bytecodes::_iconst_2: push(intcon( 2)); break; 1492 case Bytecodes::_iconst_3: push(intcon( 3)); break; 1493 case Bytecodes::_iconst_4: push(intcon( 4)); break; 1494 case Bytecodes::_iconst_5: push(intcon( 5)); break; 1495 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 1496 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 1497 case Bytecodes::_aconst_null: push(null()); break; 1498 case Bytecodes::_ldc: 1499 case Bytecodes::_ldc_w: 1500 case Bytecodes::_ldc2_w: 1501 // If the constant is unresolved, run this BC once in the interpreter. 1502 { 1503 ciConstant constant = iter().get_constant(); 1504 if (constant.basic_type() == T_OBJECT && 1505 !constant.as_object()->is_loaded()) { 1506 int index = iter().get_constant_pool_index(); 1507 constantTag tag = iter().get_constant_pool_tag(index); 1508 uncommon_trap(Deoptimization::make_trap_request 1509 (Deoptimization::Reason_unloaded, 1510 Deoptimization::Action_reinterpret, 1511 index), 1512 NULL, tag.internal_name()); 1513 break; 1514 } 1515 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 1516 "must be java_mirror of klass"); 1517 const Type* con_type = Type::make_from_constant(constant); 1518 if (con_type != NULL) { 1519 push_node(con_type->basic_type(), makecon(con_type)); 1520 } 1521 } 1522 1523 break; 1524 1525 case Bytecodes::_aload_0: 1526 push( local(0) ); 1527 break; 1528 case Bytecodes::_aload_1: 1529 push( local(1) ); 1530 break; 1531 case Bytecodes::_aload_2: 1532 push( local(2) ); 1533 break; 1534 case Bytecodes::_aload_3: 1535 push( local(3) ); 1536 break; 1537 case Bytecodes::_aload: 1538 push( local(iter().get_index()) ); 1539 break; 1540 1541 case Bytecodes::_fload_0: 1542 case Bytecodes::_iload_0: 1543 push( local(0) ); 1544 break; 1545 case Bytecodes::_fload_1: 1546 case Bytecodes::_iload_1: 1547 push( local(1) ); 1548 break; 1549 case Bytecodes::_fload_2: 1550 case Bytecodes::_iload_2: 1551 push( local(2) ); 1552 break; 1553 case Bytecodes::_fload_3: 1554 case Bytecodes::_iload_3: 1555 push( local(3) ); 1556 break; 1557 case Bytecodes::_fload: 1558 case Bytecodes::_iload: 1559 push( local(iter().get_index()) ); 1560 break; 1561 case Bytecodes::_lload_0: 1562 push_pair_local( 0 ); 1563 break; 1564 case Bytecodes::_lload_1: 1565 push_pair_local( 1 ); 1566 break; 1567 case Bytecodes::_lload_2: 1568 push_pair_local( 2 ); 1569 break; 1570 case Bytecodes::_lload_3: 1571 push_pair_local( 3 ); 1572 break; 1573 case Bytecodes::_lload: 1574 push_pair_local( iter().get_index() ); 1575 break; 1576 1577 case Bytecodes::_dload_0: 1578 push_pair_local(0); 1579 break; 1580 case Bytecodes::_dload_1: 1581 push_pair_local(1); 1582 break; 1583 case Bytecodes::_dload_2: 1584 push_pair_local(2); 1585 break; 1586 case Bytecodes::_dload_3: 1587 push_pair_local(3); 1588 break; 1589 case Bytecodes::_dload: 1590 push_pair_local(iter().get_index()); 1591 break; 1592 case Bytecodes::_fstore_0: 1593 case Bytecodes::_istore_0: 1594 case Bytecodes::_astore_0: 1595 set_local( 0, pop() ); 1596 break; 1597 case Bytecodes::_fstore_1: 1598 case Bytecodes::_istore_1: 1599 case Bytecodes::_astore_1: 1600 set_local( 1, pop() ); 1601 break; 1602 case Bytecodes::_fstore_2: 1603 case Bytecodes::_istore_2: 1604 case Bytecodes::_astore_2: 1605 set_local( 2, pop() ); 1606 break; 1607 case Bytecodes::_fstore_3: 1608 case Bytecodes::_istore_3: 1609 case Bytecodes::_astore_3: 1610 set_local( 3, pop() ); 1611 break; 1612 case Bytecodes::_fstore: 1613 case Bytecodes::_istore: 1614 case Bytecodes::_astore: 1615 set_local( iter().get_index(), pop() ); 1616 break; 1617 // long stores 1618 case Bytecodes::_lstore_0: 1619 set_pair_local( 0, pop_pair() ); 1620 break; 1621 case Bytecodes::_lstore_1: 1622 set_pair_local( 1, pop_pair() ); 1623 break; 1624 case Bytecodes::_lstore_2: 1625 set_pair_local( 2, pop_pair() ); 1626 break; 1627 case Bytecodes::_lstore_3: 1628 set_pair_local( 3, pop_pair() ); 1629 break; 1630 case Bytecodes::_lstore: 1631 set_pair_local( iter().get_index(), pop_pair() ); 1632 break; 1633 1634 // double stores 1635 case Bytecodes::_dstore_0: 1636 set_pair_local( 0, dstore_rounding(pop_pair()) ); 1637 break; 1638 case Bytecodes::_dstore_1: 1639 set_pair_local( 1, dstore_rounding(pop_pair()) ); 1640 break; 1641 case Bytecodes::_dstore_2: 1642 set_pair_local( 2, dstore_rounding(pop_pair()) ); 1643 break; 1644 case Bytecodes::_dstore_3: 1645 set_pair_local( 3, dstore_rounding(pop_pair()) ); 1646 break; 1647 case Bytecodes::_dstore: 1648 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 1649 break; 1650 1651 case Bytecodes::_pop: dec_sp(1); break; 1652 case Bytecodes::_pop2: dec_sp(2); break; 1653 case Bytecodes::_swap: 1654 a = pop(); 1655 b = pop(); 1656 push(a); 1657 push(b); 1658 break; 1659 case Bytecodes::_dup: 1660 a = pop(); 1661 push(a); 1662 push(a); 1663 break; 1664 case Bytecodes::_dup_x1: 1665 a = pop(); 1666 b = pop(); 1667 push( a ); 1668 push( b ); 1669 push( a ); 1670 break; 1671 case Bytecodes::_dup_x2: 1672 a = pop(); 1673 b = pop(); 1674 c = pop(); 1675 push( a ); 1676 push( c ); 1677 push( b ); 1678 push( a ); 1679 break; 1680 case Bytecodes::_dup2: 1681 a = pop(); 1682 b = pop(); 1683 push( b ); 1684 push( a ); 1685 push( b ); 1686 push( a ); 1687 break; 1688 1689 case Bytecodes::_dup2_x1: 1690 // before: .. c, b, a 1691 // after: .. b, a, c, b, a 1692 // not tested 1693 a = pop(); 1694 b = pop(); 1695 c = pop(); 1696 push( b ); 1697 push( a ); 1698 push( c ); 1699 push( b ); 1700 push( a ); 1701 break; 1702 case Bytecodes::_dup2_x2: 1703 // before: .. d, c, b, a 1704 // after: .. b, a, d, c, b, a 1705 // not tested 1706 a = pop(); 1707 b = pop(); 1708 c = pop(); 1709 d = pop(); 1710 push( b ); 1711 push( a ); 1712 push( d ); 1713 push( c ); 1714 push( b ); 1715 push( a ); 1716 break; 1717 1718 case Bytecodes::_arraylength: { 1719 // Must do null-check with value on expression stack 1720 Node *ary = null_check(peek(), T_ARRAY); 1721 // Compile-time detect of null-exception? 1722 if (stopped()) return; 1723 a = pop(); 1724 push(load_array_length(a)); 1725 break; 1726 } 1727 1728 case Bytecodes::_baload: array_load(T_BYTE); break; 1729 case Bytecodes::_caload: array_load(T_CHAR); break; 1730 case Bytecodes::_iaload: array_load(T_INT); break; 1731 case Bytecodes::_saload: array_load(T_SHORT); break; 1732 case Bytecodes::_faload: array_load(T_FLOAT); break; 1733 case Bytecodes::_aaload: array_load(T_OBJECT); break; 1734 case Bytecodes::_laload: { 1735 a = array_addressing(T_LONG, 0, false); 1736 if (stopped()) return; // guaranteed null or range check 1737 dec_sp(2); // Pop array and index 1738 push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS, MemNode::unordered)); 1739 break; 1740 } 1741 case Bytecodes::_daload: { 1742 a = array_addressing(T_DOUBLE, 0, false); 1743 if (stopped()) return; // guaranteed null or range check 1744 dec_sp(2); // Pop array and index 1745 push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered)); 1746 break; 1747 } 1748 case Bytecodes::_bastore: array_store(T_BYTE); break; 1749 case Bytecodes::_castore: array_store(T_CHAR); break; 1750 case Bytecodes::_iastore: array_store(T_INT); break; 1751 case Bytecodes::_sastore: array_store(T_SHORT); break; 1752 case Bytecodes::_fastore: array_store(T_FLOAT); break; 1753 case Bytecodes::_aastore: { 1754 d = array_addressing(T_OBJECT, 1, true); 1755 if (stopped()) return; // guaranteed null or range check 1756 array_store_check(); 1757 c = pop(); // Oop to store 1758 b = pop(); // index (already used) 1759 a = pop(); // the array itself 1760 const TypeOopPtr* elemtype = _gvn.type(a)->is_aryptr()->elem()->make_oopptr(); 1761 const TypeAryPtr* adr_type = TypeAryPtr::OOPS; 1762 // Note: We don't need a write barrier for Shenandoah on a here, because 1763 // a is not used except for an assert. The address d already has the 1764 // write barrier. Adding a barrier on a only results in additional code 1765 // being generated. 1766 c = shenandoah_storeval_barrier(c); 1767 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT, 1768 StoreNode::release_if_reference(T_OBJECT)); 1769 break; 1770 } 1771 case Bytecodes::_lastore: { 1772 a = array_addressing(T_LONG, 2, true); 1773 if (stopped()) return; // guaranteed null or range check 1774 c = pop_pair(); 1775 dec_sp(2); // Pop array and index 1776 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS, MemNode::unordered); 1777 break; 1778 } 1779 case Bytecodes::_dastore: { 1780 a = array_addressing(T_DOUBLE, 2, true); 1781 if (stopped()) return; // guaranteed null or range check 1782 c = pop_pair(); 1783 dec_sp(2); // Pop array and index 1784 c = dstore_rounding(c); 1785 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered); 1786 break; 1787 } 1788 case Bytecodes::_getfield: 1789 do_getfield(); 1790 break; 1791 1792 case Bytecodes::_getstatic: 1793 do_getstatic(); 1794 break; 1795 1796 case Bytecodes::_putfield: 1797 do_putfield(); 1798 break; 1799 1800 case Bytecodes::_putstatic: 1801 do_putstatic(); 1802 break; 1803 1804 case Bytecodes::_irem: 1805 do_irem(); 1806 break; 1807 case Bytecodes::_idiv: 1808 // Must keep both values on the expression-stack during null-check 1809 zero_check_int(peek()); 1810 // Compile-time detect of null-exception? 1811 if (stopped()) return; 1812 b = pop(); 1813 a = pop(); 1814 push( _gvn.transform( new DivINode(control(),a,b) ) ); 1815 break; 1816 case Bytecodes::_imul: 1817 b = pop(); a = pop(); 1818 push( _gvn.transform( new MulINode(a,b) ) ); 1819 break; 1820 case Bytecodes::_iadd: 1821 b = pop(); a = pop(); 1822 push( _gvn.transform( new AddINode(a,b) ) ); 1823 break; 1824 case Bytecodes::_ineg: 1825 a = pop(); 1826 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 1827 break; 1828 case Bytecodes::_isub: 1829 b = pop(); a = pop(); 1830 push( _gvn.transform( new SubINode(a,b) ) ); 1831 break; 1832 case Bytecodes::_iand: 1833 b = pop(); a = pop(); 1834 push( _gvn.transform( new AndINode(a,b) ) ); 1835 break; 1836 case Bytecodes::_ior: 1837 b = pop(); a = pop(); 1838 push( _gvn.transform( new OrINode(a,b) ) ); 1839 break; 1840 case Bytecodes::_ixor: 1841 b = pop(); a = pop(); 1842 push( _gvn.transform( new XorINode(a,b) ) ); 1843 break; 1844 case Bytecodes::_ishl: 1845 b = pop(); a = pop(); 1846 push( _gvn.transform( new LShiftINode(a,b) ) ); 1847 break; 1848 case Bytecodes::_ishr: 1849 b = pop(); a = pop(); 1850 push( _gvn.transform( new RShiftINode(a,b) ) ); 1851 break; 1852 case Bytecodes::_iushr: 1853 b = pop(); a = pop(); 1854 push( _gvn.transform( new URShiftINode(a,b) ) ); 1855 break; 1856 1857 case Bytecodes::_fneg: 1858 a = pop(); 1859 b = _gvn.transform(new NegFNode (a)); 1860 push(b); 1861 break; 1862 1863 case Bytecodes::_fsub: 1864 b = pop(); 1865 a = pop(); 1866 c = _gvn.transform( new SubFNode(a,b) ); 1867 d = precision_rounding(c); 1868 push( d ); 1869 break; 1870 1871 case Bytecodes::_fadd: 1872 b = pop(); 1873 a = pop(); 1874 c = _gvn.transform( new AddFNode(a,b) ); 1875 d = precision_rounding(c); 1876 push( d ); 1877 break; 1878 1879 case Bytecodes::_fmul: 1880 b = pop(); 1881 a = pop(); 1882 c = _gvn.transform( new MulFNode(a,b) ); 1883 d = precision_rounding(c); 1884 push( d ); 1885 break; 1886 1887 case Bytecodes::_fdiv: 1888 b = pop(); 1889 a = pop(); 1890 c = _gvn.transform( new DivFNode(0,a,b) ); 1891 d = precision_rounding(c); 1892 push( d ); 1893 break; 1894 1895 case Bytecodes::_frem: 1896 if (Matcher::has_match_rule(Op_ModF)) { 1897 // Generate a ModF node. 1898 b = pop(); 1899 a = pop(); 1900 c = _gvn.transform( new ModFNode(0,a,b) ); 1901 d = precision_rounding(c); 1902 push( d ); 1903 } 1904 else { 1905 // Generate a call. 1906 modf(); 1907 } 1908 break; 1909 1910 case Bytecodes::_fcmpl: 1911 b = pop(); 1912 a = pop(); 1913 c = _gvn.transform( new CmpF3Node( a, b)); 1914 push(c); 1915 break; 1916 case Bytecodes::_fcmpg: 1917 b = pop(); 1918 a = pop(); 1919 1920 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 1921 // which negates the result sign except for unordered. Flip the unordered 1922 // as well by using CmpF3 which implements unordered-lesser instead of 1923 // unordered-greater semantics. Finally, commute the result bits. Result 1924 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 1925 c = _gvn.transform( new CmpF3Node( b, a)); 1926 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 1927 push(c); 1928 break; 1929 1930 case Bytecodes::_f2i: 1931 a = pop(); 1932 push(_gvn.transform(new ConvF2INode(a))); 1933 break; 1934 1935 case Bytecodes::_d2i: 1936 a = pop_pair(); 1937 b = _gvn.transform(new ConvD2INode(a)); 1938 push( b ); 1939 break; 1940 1941 case Bytecodes::_f2d: 1942 a = pop(); 1943 b = _gvn.transform( new ConvF2DNode(a)); 1944 push_pair( b ); 1945 break; 1946 1947 case Bytecodes::_d2f: 1948 a = pop_pair(); 1949 b = _gvn.transform( new ConvD2FNode(a)); 1950 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 1951 //b = _gvn.transform(new RoundFloatNode(0, b) ); 1952 push( b ); 1953 break; 1954 1955 case Bytecodes::_l2f: 1956 if (Matcher::convL2FSupported()) { 1957 a = pop_pair(); 1958 b = _gvn.transform( new ConvL2FNode(a)); 1959 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 1960 // Rather than storing the result into an FP register then pushing 1961 // out to memory to round, the machine instruction that implements 1962 // ConvL2D is responsible for rounding. 1963 // c = precision_rounding(b); 1964 c = _gvn.transform(b); 1965 push(c); 1966 } else { 1967 l2f(); 1968 } 1969 break; 1970 1971 case Bytecodes::_l2d: 1972 a = pop_pair(); 1973 b = _gvn.transform( new ConvL2DNode(a)); 1974 // For i486.ad, rounding is always necessary (see _l2f above). 1975 // c = dprecision_rounding(b); 1976 c = _gvn.transform(b); 1977 push_pair(c); 1978 break; 1979 1980 case Bytecodes::_f2l: 1981 a = pop(); 1982 b = _gvn.transform( new ConvF2LNode(a)); 1983 push_pair(b); 1984 break; 1985 1986 case Bytecodes::_d2l: 1987 a = pop_pair(); 1988 b = _gvn.transform( new ConvD2LNode(a)); 1989 push_pair(b); 1990 break; 1991 1992 case Bytecodes::_dsub: 1993 b = pop_pair(); 1994 a = pop_pair(); 1995 c = _gvn.transform( new SubDNode(a,b) ); 1996 d = dprecision_rounding(c); 1997 push_pair( d ); 1998 break; 1999 2000 case Bytecodes::_dadd: 2001 b = pop_pair(); 2002 a = pop_pair(); 2003 c = _gvn.transform( new AddDNode(a,b) ); 2004 d = dprecision_rounding(c); 2005 push_pair( d ); 2006 break; 2007 2008 case Bytecodes::_dmul: 2009 b = pop_pair(); 2010 a = pop_pair(); 2011 c = _gvn.transform( new MulDNode(a,b) ); 2012 d = dprecision_rounding(c); 2013 push_pair( d ); 2014 break; 2015 2016 case Bytecodes::_ddiv: 2017 b = pop_pair(); 2018 a = pop_pair(); 2019 c = _gvn.transform( new DivDNode(0,a,b) ); 2020 d = dprecision_rounding(c); 2021 push_pair( d ); 2022 break; 2023 2024 case Bytecodes::_dneg: 2025 a = pop_pair(); 2026 b = _gvn.transform(new NegDNode (a)); 2027 push_pair(b); 2028 break; 2029 2030 case Bytecodes::_drem: 2031 if (Matcher::has_match_rule(Op_ModD)) { 2032 // Generate a ModD node. 2033 b = pop_pair(); 2034 a = pop_pair(); 2035 // a % b 2036 2037 c = _gvn.transform( new ModDNode(0,a,b) ); 2038 d = dprecision_rounding(c); 2039 push_pair( d ); 2040 } 2041 else { 2042 // Generate a call. 2043 modd(); 2044 } 2045 break; 2046 2047 case Bytecodes::_dcmpl: 2048 b = pop_pair(); 2049 a = pop_pair(); 2050 c = _gvn.transform( new CmpD3Node( a, b)); 2051 push(c); 2052 break; 2053 2054 case Bytecodes::_dcmpg: 2055 b = pop_pair(); 2056 a = pop_pair(); 2057 // Same as dcmpl but need to flip the unordered case. 2058 // Commute the inputs, which negates the result sign except for unordered. 2059 // Flip the unordered as well by using CmpD3 which implements 2060 // unordered-lesser instead of unordered-greater semantics. 2061 // Finally, negate the result bits. Result is same as using a 2062 // CmpD3Greater except we did it with CmpD3 alone. 2063 c = _gvn.transform( new CmpD3Node( b, a)); 2064 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2065 push(c); 2066 break; 2067 2068 2069 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2070 case Bytecodes::_land: 2071 b = pop_pair(); 2072 a = pop_pair(); 2073 c = _gvn.transform( new AndLNode(a,b) ); 2074 push_pair(c); 2075 break; 2076 case Bytecodes::_lor: 2077 b = pop_pair(); 2078 a = pop_pair(); 2079 c = _gvn.transform( new OrLNode(a,b) ); 2080 push_pair(c); 2081 break; 2082 case Bytecodes::_lxor: 2083 b = pop_pair(); 2084 a = pop_pair(); 2085 c = _gvn.transform( new XorLNode(a,b) ); 2086 push_pair(c); 2087 break; 2088 2089 case Bytecodes::_lshl: 2090 b = pop(); // the shift count 2091 a = pop_pair(); // value to be shifted 2092 c = _gvn.transform( new LShiftLNode(a,b) ); 2093 push_pair(c); 2094 break; 2095 case Bytecodes::_lshr: 2096 b = pop(); // the shift count 2097 a = pop_pair(); // value to be shifted 2098 c = _gvn.transform( new RShiftLNode(a,b) ); 2099 push_pair(c); 2100 break; 2101 case Bytecodes::_lushr: 2102 b = pop(); // the shift count 2103 a = pop_pair(); // value to be shifted 2104 c = _gvn.transform( new URShiftLNode(a,b) ); 2105 push_pair(c); 2106 break; 2107 case Bytecodes::_lmul: 2108 b = pop_pair(); 2109 a = pop_pair(); 2110 c = _gvn.transform( new MulLNode(a,b) ); 2111 push_pair(c); 2112 break; 2113 2114 case Bytecodes::_lrem: 2115 // Must keep both values on the expression-stack during null-check 2116 assert(peek(0) == top(), "long word order"); 2117 zero_check_long(peek(1)); 2118 // Compile-time detect of null-exception? 2119 if (stopped()) return; 2120 b = pop_pair(); 2121 a = pop_pair(); 2122 c = _gvn.transform( new ModLNode(control(),a,b) ); 2123 push_pair(c); 2124 break; 2125 2126 case Bytecodes::_ldiv: 2127 // Must keep both values on the expression-stack during null-check 2128 assert(peek(0) == top(), "long word order"); 2129 zero_check_long(peek(1)); 2130 // Compile-time detect of null-exception? 2131 if (stopped()) return; 2132 b = pop_pair(); 2133 a = pop_pair(); 2134 c = _gvn.transform( new DivLNode(control(),a,b) ); 2135 push_pair(c); 2136 break; 2137 2138 case Bytecodes::_ladd: 2139 b = pop_pair(); 2140 a = pop_pair(); 2141 c = _gvn.transform( new AddLNode(a,b) ); 2142 push_pair(c); 2143 break; 2144 case Bytecodes::_lsub: 2145 b = pop_pair(); 2146 a = pop_pair(); 2147 c = _gvn.transform( new SubLNode(a,b) ); 2148 push_pair(c); 2149 break; 2150 case Bytecodes::_lcmp: 2151 // Safepoints are now inserted _before_ branches. The long-compare 2152 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 2153 // slew of control flow. These are usually followed by a CmpI vs zero and 2154 // a branch; this pattern then optimizes to the obvious long-compare and 2155 // branch. However, if the branch is backwards there's a Safepoint 2156 // inserted. The inserted Safepoint captures the JVM state at the 2157 // pre-branch point, i.e. it captures the 3-way value. Thus if a 2158 // long-compare is used to control a loop the debug info will force 2159 // computation of the 3-way value, even though the generated code uses a 2160 // long-compare and branch. We try to rectify the situation by inserting 2161 // a SafePoint here and have it dominate and kill the safepoint added at a 2162 // following backwards branch. At this point the JVM state merely holds 2 2163 // longs but not the 3-way value. 2164 if( UseLoopSafepoints ) { 2165 switch( iter().next_bc() ) { 2166 case Bytecodes::_ifgt: 2167 case Bytecodes::_iflt: 2168 case Bytecodes::_ifge: 2169 case Bytecodes::_ifle: 2170 case Bytecodes::_ifne: 2171 case Bytecodes::_ifeq: 2172 // If this is a backwards branch in the bytecodes, add Safepoint 2173 maybe_add_safepoint(iter().next_get_dest()); 2174 default: 2175 break; 2176 } 2177 } 2178 b = pop_pair(); 2179 a = pop_pair(); 2180 c = _gvn.transform( new CmpL3Node( a, b )); 2181 push(c); 2182 break; 2183 2184 case Bytecodes::_lneg: 2185 a = pop_pair(); 2186 b = _gvn.transform( new SubLNode(longcon(0),a)); 2187 push_pair(b); 2188 break; 2189 case Bytecodes::_l2i: 2190 a = pop_pair(); 2191 push( _gvn.transform( new ConvL2INode(a))); 2192 break; 2193 case Bytecodes::_i2l: 2194 a = pop(); 2195 b = _gvn.transform( new ConvI2LNode(a)); 2196 push_pair(b); 2197 break; 2198 case Bytecodes::_i2b: 2199 // Sign extend 2200 a = pop(); 2201 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); 2202 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); 2203 push( a ); 2204 break; 2205 case Bytecodes::_i2s: 2206 a = pop(); 2207 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); 2208 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); 2209 push( a ); 2210 break; 2211 case Bytecodes::_i2c: 2212 a = pop(); 2213 push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); 2214 break; 2215 2216 case Bytecodes::_i2f: 2217 a = pop(); 2218 b = _gvn.transform( new ConvI2FNode(a) ) ; 2219 c = precision_rounding(b); 2220 push (b); 2221 break; 2222 2223 case Bytecodes::_i2d: 2224 a = pop(); 2225 b = _gvn.transform( new ConvI2DNode(a)); 2226 push_pair(b); 2227 break; 2228 2229 case Bytecodes::_iinc: // Increment local 2230 i = iter().get_index(); // Get local index 2231 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 2232 break; 2233 2234 // Exit points of synchronized methods must have an unlock node 2235 case Bytecodes::_return: 2236 return_current(NULL); 2237 break; 2238 2239 case Bytecodes::_ireturn: 2240 case Bytecodes::_areturn: 2241 case Bytecodes::_freturn: 2242 return_current(pop()); 2243 break; 2244 case Bytecodes::_lreturn: 2245 return_current(pop_pair()); 2246 break; 2247 case Bytecodes::_dreturn: 2248 return_current(pop_pair()); 2249 break; 2250 2251 case Bytecodes::_athrow: 2252 // null exception oop throws NULL pointer exception 2253 null_check(peek()); 2254 if (stopped()) return; 2255 // Hook the thrown exception directly to subsequent handlers. 2256 if (BailoutToInterpreterForThrows) { 2257 // Keep method interpreted from now on. 2258 uncommon_trap(Deoptimization::Reason_unhandled, 2259 Deoptimization::Action_make_not_compilable); 2260 return; 2261 } 2262 if (env()->jvmti_can_post_on_exceptions()) { 2263 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 2264 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 2265 } 2266 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 2267 add_exception_state(make_exception_state(peek())); 2268 break; 2269 2270 case Bytecodes::_goto: // fall through 2271 case Bytecodes::_goto_w: { 2272 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 2273 2274 // If this is a backwards branch in the bytecodes, add Safepoint 2275 maybe_add_safepoint(target_bci); 2276 2277 // Update method data 2278 profile_taken_branch(target_bci); 2279 2280 // Merge the current control into the target basic block 2281 merge(target_bci); 2282 2283 // See if we can get some profile data and hand it off to the next block 2284 Block *target_block = block()->successor_for_bci(target_bci); 2285 if (target_block->pred_count() != 1) break; 2286 ciMethodData* methodData = method()->method_data(); 2287 if (!methodData->is_mature()) break; 2288 ciProfileData* data = methodData->bci_to_data(bci()); 2289 assert( data->is_JumpData(), "" ); 2290 int taken = ((ciJumpData*)data)->taken(); 2291 taken = method()->scale_count(taken); 2292 target_block->set_count(taken); 2293 break; 2294 } 2295 2296 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 2297 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 2298 handle_if_null: 2299 // If this is a backwards branch in the bytecodes, add Safepoint 2300 maybe_add_safepoint(iter().get_dest()); 2301 a = null(); 2302 b = pop(); 2303 if (!_gvn.type(b)->speculative_maybe_null() && 2304 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 2305 inc_sp(1); 2306 Node* null_ctl = top(); 2307 b = null_check_oop(b, &null_ctl, true, true, true); 2308 assert(null_ctl->is_top(), "no null control here"); 2309 dec_sp(1); 2310 } else if (_gvn.type(b)->speculative_always_null() && 2311 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { 2312 inc_sp(1); 2313 b = null_assert(b); 2314 dec_sp(1); 2315 } 2316 c = _gvn.transform( new CmpPNode(b, a) ); 2317 do_ifnull(btest, c); 2318 break; 2319 2320 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 2321 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 2322 handle_if_acmp: 2323 // If this is a backwards branch in the bytecodes, add Safepoint 2324 maybe_add_safepoint(iter().get_dest()); 2325 a = pop(); 2326 b = pop(); 2327 if (UseShenandoahGC && ShenandoahAcmpBarrier && ShenandoahVerifyOptoBarriers) { 2328 a = shenandoah_write_barrier(a); 2329 b = shenandoah_write_barrier(b); 2330 } 2331 c = _gvn.transform( new CmpPNode(b, a) ); 2332 c = optimize_cmp_with_klass(c); 2333 do_if(btest, c); 2334 break; 2335 2336 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 2337 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 2338 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 2339 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 2340 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 2341 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 2342 handle_ifxx: 2343 // If this is a backwards branch in the bytecodes, add Safepoint 2344 maybe_add_safepoint(iter().get_dest()); 2345 a = _gvn.intcon(0); 2346 b = pop(); 2347 c = _gvn.transform( new CmpINode(b, a) ); 2348 do_if(btest, c); 2349 break; 2350 2351 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 2352 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 2353 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 2354 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 2355 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 2356 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 2357 handle_if_icmp: 2358 // If this is a backwards branch in the bytecodes, add Safepoint 2359 maybe_add_safepoint(iter().get_dest()); 2360 a = pop(); 2361 b = pop(); 2362 c = _gvn.transform( new CmpINode( b, a ) ); 2363 do_if(btest, c); 2364 break; 2365 2366 case Bytecodes::_tableswitch: 2367 do_tableswitch(); 2368 break; 2369 2370 case Bytecodes::_lookupswitch: 2371 do_lookupswitch(); 2372 break; 2373 2374 case Bytecodes::_invokestatic: 2375 case Bytecodes::_invokedynamic: 2376 case Bytecodes::_invokespecial: 2377 case Bytecodes::_invokevirtual: 2378 case Bytecodes::_invokeinterface: 2379 do_call(); 2380 break; 2381 case Bytecodes::_checkcast: 2382 do_checkcast(); 2383 break; 2384 case Bytecodes::_instanceof: 2385 do_instanceof(); 2386 break; 2387 case Bytecodes::_anewarray: 2388 do_anewarray(); 2389 break; 2390 case Bytecodes::_newarray: 2391 do_newarray((BasicType)iter().get_index()); 2392 break; 2393 case Bytecodes::_multianewarray: 2394 do_multianewarray(); 2395 break; 2396 case Bytecodes::_new: 2397 do_new(); 2398 break; 2399 2400 case Bytecodes::_jsr: 2401 case Bytecodes::_jsr_w: 2402 do_jsr(); 2403 break; 2404 2405 case Bytecodes::_ret: 2406 do_ret(); 2407 break; 2408 2409 2410 case Bytecodes::_monitorenter: 2411 do_monitor_enter(); 2412 break; 2413 2414 case Bytecodes::_monitorexit: 2415 do_monitor_exit(); 2416 break; 2417 2418 case Bytecodes::_breakpoint: 2419 // Breakpoint set concurrently to compile 2420 // %%% use an uncommon trap? 2421 C->record_failure("breakpoint in method"); 2422 return; 2423 2424 default: 2425 #ifndef PRODUCT 2426 map()->dump(99); 2427 #endif 2428 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 2429 ShouldNotReachHere(); 2430 } 2431 2432 #ifndef PRODUCT 2433 IdealGraphPrinter *printer = C->printer(); 2434 if (printer && printer->should_print(1)) { 2435 char buffer[256]; 2436 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 2437 bool old = printer->traverse_outs(); 2438 printer->set_traverse_outs(true); 2439 printer->print_method(buffer, 4); 2440 printer->set_traverse_outs(old); 2441 } 2442 #endif 2443 }