1 /* 2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/matcher.hpp" 40 #include "opto/memnode.hpp" 41 #include "opto/mulnode.hpp" 42 #include "opto/opaquenode.hpp" 43 #include "opto/parse.hpp" 44 #include "opto/runtime.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 48 #ifndef PRODUCT 49 extern int explicit_null_checks_inserted, 50 explicit_null_checks_elided; 51 #endif 52 53 //---------------------------------array_load---------------------------------- 54 void Parse::array_load(BasicType bt) { 55 const Type* elemtype = Type::TOP; 56 bool big_val = bt == T_DOUBLE || bt == T_LONG; 57 Node* adr = array_addressing(bt, 0, &elemtype); 58 if (stopped()) return; // guaranteed null or range check 59 60 pop(); // index (already used) 61 Node* array = pop(); // the array itself 62 63 if (elemtype == TypeInt::BOOL) { 64 bt = T_BOOLEAN; 65 } else if (bt == T_OBJECT) { 66 elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 67 } 68 69 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 70 71 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt, C2_MO_RELAXED | C2_ACCESS_ON_HEAP | C2_ACCESS_ON_ARRAY); 72 if (big_val) { 73 push_pair(ld); 74 } else { 75 push(ld); 76 } 77 } 78 79 80 //--------------------------------array_store---------------------------------- 81 void Parse::array_store(BasicType bt) { 82 const Type* elemtype = Type::TOP; 83 bool big_val = bt == T_DOUBLE || bt == T_LONG; 84 Node* adr = array_addressing(bt, big_val ? 2 : 1, &elemtype); 85 if (stopped()) return; // guaranteed null or range check 86 if (bt == T_OBJECT) { 87 array_store_check(); 88 } 89 Node* val; // Oop to store 90 if (big_val) { 91 val = pop_pair(); 92 } else { 93 val = pop(); 94 } 95 pop(); // index (already used) 96 Node* array = pop(); // the array itself 97 98 if (elemtype == TypeInt::BOOL) { 99 bt = T_BOOLEAN; 100 } else if (bt == T_OBJECT) { 101 elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 102 } 103 104 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 105 106 access_store_at(control(), array, adr, adr_type, val, elemtype, bt, C2_MO_RELAXED | C2_ACCESS_ON_HEAP | C2_ACCESS_ON_ARRAY); 107 } 108 109 110 //------------------------------array_addressing------------------------------- 111 // Pull array and index from the stack. Compute pointer-to-element. 112 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 113 Node *idx = peek(0+vals); // Get from stack without popping 114 Node *ary = peek(1+vals); // in case of exception 115 116 // Null check the array base, with correct stack contents 117 ary = null_check(ary, T_ARRAY); 118 // Compile-time detect of null-exception? 119 if (stopped()) return top(); 120 121 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 122 const TypeInt* sizetype = arytype->size(); 123 const Type* elemtype = arytype->elem(); 124 125 if (UseUniqueSubclasses && result2 != NULL) { 126 const Type* el = elemtype->make_ptr(); 127 if (el && el->isa_instptr()) { 128 const TypeInstPtr* toop = el->is_instptr(); 129 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 130 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 131 const Type* subklass = Type::get_const_type(toop->klass()); 132 elemtype = subklass->join_speculative(el); 133 } 134 } 135 } 136 137 // Check for big class initializers with all constant offsets 138 // feeding into a known-size array. 139 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 140 // See if the highest idx value is less than the lowest array bound, 141 // and if the idx value cannot be negative: 142 bool need_range_check = true; 143 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 144 need_range_check = false; 145 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 146 } 147 148 ciKlass * arytype_klass = arytype->klass(); 149 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 150 // Only fails for some -Xcomp runs 151 // The class is unloaded. We have to run this bytecode in the interpreter. 152 uncommon_trap(Deoptimization::Reason_unloaded, 153 Deoptimization::Action_reinterpret, 154 arytype->klass(), "!loaded array"); 155 return top(); 156 } 157 158 // Do the range check 159 if (GenerateRangeChecks && need_range_check) { 160 Node* tst; 161 if (sizetype->_hi <= 0) { 162 // The greatest array bound is negative, so we can conclude that we're 163 // compiling unreachable code, but the unsigned compare trick used below 164 // only works with non-negative lengths. Instead, hack "tst" to be zero so 165 // the uncommon_trap path will always be taken. 166 tst = _gvn.intcon(0); 167 } else { 168 // Range is constant in array-oop, so we can use the original state of mem 169 Node* len = load_array_length(ary); 170 171 // Test length vs index (standard trick using unsigned compare) 172 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 173 BoolTest::mask btest = BoolTest::lt; 174 tst = _gvn.transform( new BoolNode(chk, btest) ); 175 } 176 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 177 _gvn.set_type(rc, rc->Value(&_gvn)); 178 if (!tst->is_Con()) { 179 record_for_igvn(rc); 180 } 181 set_control(_gvn.transform(new IfTrueNode(rc))); 182 // Branch to failure if out of bounds 183 { 184 PreserveJVMState pjvms(this); 185 set_control(_gvn.transform(new IfFalseNode(rc))); 186 if (C->allow_range_check_smearing()) { 187 // Do not use builtin_throw, since range checks are sometimes 188 // made more stringent by an optimistic transformation. 189 // This creates "tentative" range checks at this point, 190 // which are not guaranteed to throw exceptions. 191 // See IfNode::Ideal, is_range_check, adjust_check. 192 uncommon_trap(Deoptimization::Reason_range_check, 193 Deoptimization::Action_make_not_entrant, 194 NULL, "range_check"); 195 } else { 196 // If we have already recompiled with the range-check-widening 197 // heroic optimization turned off, then we must really be throwing 198 // range check exceptions. 199 builtin_throw(Deoptimization::Reason_range_check, idx); 200 } 201 } 202 } 203 // Check for always knowing you are throwing a range-check exception 204 if (stopped()) return top(); 205 206 // Make array address computation control dependent to prevent it 207 // from floating above the range check during loop optimizations. 208 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 209 210 if (result2 != NULL) *result2 = elemtype; 211 212 assert(ptr != top(), "top should go hand-in-hand with stopped"); 213 214 return ptr; 215 } 216 217 218 // returns IfNode 219 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) { 220 Node *cmp = _gvn.transform( new CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 221 Node *tst = _gvn.transform( new BoolNode( cmp, mask)); 222 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN ); 223 return iff; 224 } 225 226 // return Region node 227 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 228 Node *region = new RegionNode(3); // 2 results 229 record_for_igvn(region); 230 region->init_req(1, iffalse); 231 region->init_req(2, iftrue ); 232 _gvn.set_type(region, Type::CONTROL); 233 region = _gvn.transform(region); 234 set_control (region); 235 return region; 236 } 237 238 239 //------------------------------helper for tableswitch------------------------- 240 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 241 // True branch, use existing map info 242 { PreserveJVMState pjvms(this); 243 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 244 set_control( iftrue ); 245 profile_switch_case(prof_table_index); 246 merge_new_path(dest_bci_if_true); 247 } 248 249 // False branch 250 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 251 set_control( iffalse ); 252 } 253 254 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 255 // True branch, use existing map info 256 { PreserveJVMState pjvms(this); 257 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 258 set_control( iffalse ); 259 profile_switch_case(prof_table_index); 260 merge_new_path(dest_bci_if_true); 261 } 262 263 // False branch 264 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 265 set_control( iftrue ); 266 } 267 268 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) { 269 // False branch, use existing map and control() 270 profile_switch_case(prof_table_index); 271 merge_new_path(dest_bci); 272 } 273 274 275 extern "C" { 276 static int jint_cmp(const void *i, const void *j) { 277 int a = *(jint *)i; 278 int b = *(jint *)j; 279 return a > b ? 1 : a < b ? -1 : 0; 280 } 281 } 282 283 284 // Default value for methodData switch indexing. Must be a negative value to avoid 285 // conflict with any legal switch index. 286 #define NullTableIndex -1 287 288 class SwitchRange : public StackObj { 289 // a range of integers coupled with a bci destination 290 jint _lo; // inclusive lower limit 291 jint _hi; // inclusive upper limit 292 int _dest; 293 int _table_index; // index into method data table 294 295 public: 296 jint lo() const { return _lo; } 297 jint hi() const { return _hi; } 298 int dest() const { return _dest; } 299 int table_index() const { return _table_index; } 300 bool is_singleton() const { return _lo == _hi; } 301 302 void setRange(jint lo, jint hi, int dest, int table_index) { 303 assert(lo <= hi, "must be a non-empty range"); 304 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; 305 } 306 bool adjoinRange(jint lo, jint hi, int dest, int table_index) { 307 assert(lo <= hi, "must be a non-empty range"); 308 if (lo == _hi+1 && dest == _dest && table_index == _table_index) { 309 _hi = hi; 310 return true; 311 } 312 return false; 313 } 314 315 void set (jint value, int dest, int table_index) { 316 setRange(value, value, dest, table_index); 317 } 318 bool adjoin(jint value, int dest, int table_index) { 319 return adjoinRange(value, value, dest, table_index); 320 } 321 322 void print() { 323 if (is_singleton()) 324 tty->print(" {%d}=>%d", lo(), dest()); 325 else if (lo() == min_jint) 326 tty->print(" {..%d}=>%d", hi(), dest()); 327 else if (hi() == max_jint) 328 tty->print(" {%d..}=>%d", lo(), dest()); 329 else 330 tty->print(" {%d..%d}=>%d", lo(), hi(), dest()); 331 } 332 }; 333 334 335 //-------------------------------do_tableswitch-------------------------------- 336 void Parse::do_tableswitch() { 337 Node* lookup = pop(); 338 339 // Get information about tableswitch 340 int default_dest = iter().get_dest_table(0); 341 int lo_index = iter().get_int_table(1); 342 int hi_index = iter().get_int_table(2); 343 int len = hi_index - lo_index + 1; 344 345 if (len < 1) { 346 // If this is a backward branch, add safepoint 347 maybe_add_safepoint(default_dest); 348 merge(default_dest); 349 return; 350 } 351 352 // generate decision tree, using trichotomy when possible 353 int rnum = len+2; 354 bool makes_backward_branch = false; 355 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 356 int rp = -1; 357 if (lo_index != min_jint) { 358 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex); 359 } 360 for (int j = 0; j < len; j++) { 361 jint match_int = lo_index+j; 362 int dest = iter().get_dest_table(j+3); 363 makes_backward_branch |= (dest <= bci()); 364 int table_index = method_data_update() ? j : NullTableIndex; 365 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) { 366 ranges[++rp].set(match_int, dest, table_index); 367 } 368 } 369 jint highest = lo_index+(len-1); 370 assert(ranges[rp].hi() == highest, ""); 371 if (highest != max_jint 372 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) { 373 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 374 } 375 assert(rp < len+2, "not too many ranges"); 376 377 // Safepoint in case if backward branch observed 378 if( makes_backward_branch && UseLoopSafepoints ) 379 add_safepoint(); 380 381 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 382 } 383 384 385 //------------------------------do_lookupswitch-------------------------------- 386 void Parse::do_lookupswitch() { 387 Node *lookup = pop(); // lookup value 388 // Get information about lookupswitch 389 int default_dest = iter().get_dest_table(0); 390 int len = iter().get_int_table(1); 391 392 if (len < 1) { // If this is a backward branch, add safepoint 393 maybe_add_safepoint(default_dest); 394 merge(default_dest); 395 return; 396 } 397 398 // generate decision tree, using trichotomy when possible 399 jint* table = NEW_RESOURCE_ARRAY(jint, len*2); 400 { 401 for( int j = 0; j < len; j++ ) { 402 table[j+j+0] = iter().get_int_table(2+j+j); 403 table[j+j+1] = iter().get_dest_table(2+j+j+1); 404 } 405 qsort( table, len, 2*sizeof(table[0]), jint_cmp ); 406 } 407 408 int rnum = len*2+1; 409 bool makes_backward_branch = false; 410 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 411 int rp = -1; 412 for( int j = 0; j < len; j++ ) { 413 jint match_int = table[j+j+0]; 414 int dest = table[j+j+1]; 415 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 416 int table_index = method_data_update() ? j : NullTableIndex; 417 makes_backward_branch |= (dest <= bci()); 418 if( match_int != next_lo ) { 419 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex); 420 } 421 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) { 422 ranges[++rp].set(match_int, dest, table_index); 423 } 424 } 425 jint highest = table[2*(len-1)]; 426 assert(ranges[rp].hi() == highest, ""); 427 if( highest != max_jint 428 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) { 429 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 430 } 431 assert(rp < rnum, "not too many ranges"); 432 433 // Safepoint in case backward branch observed 434 if( makes_backward_branch && UseLoopSafepoints ) 435 add_safepoint(); 436 437 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 438 } 439 440 //----------------------------create_jump_tables------------------------------- 441 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 442 // Are jumptables enabled 443 if (!UseJumpTables) return false; 444 445 // Are jumptables supported 446 if (!Matcher::has_match_rule(Op_Jump)) return false; 447 448 // Don't make jump table if profiling 449 if (method_data_update()) return false; 450 451 // Decide if a guard is needed to lop off big ranges at either (or 452 // both) end(s) of the input set. We'll call this the default target 453 // even though we can't be sure that it is the true "default". 454 455 bool needs_guard = false; 456 int default_dest; 457 int64_t total_outlier_size = 0; 458 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 459 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 460 461 if (lo->dest() == hi->dest()) { 462 total_outlier_size = hi_size + lo_size; 463 default_dest = lo->dest(); 464 } else if (lo_size > hi_size) { 465 total_outlier_size = lo_size; 466 default_dest = lo->dest(); 467 } else { 468 total_outlier_size = hi_size; 469 default_dest = hi->dest(); 470 } 471 472 // If a guard test will eliminate very sparse end ranges, then 473 // it is worth the cost of an extra jump. 474 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 475 needs_guard = true; 476 if (default_dest == lo->dest()) lo++; 477 if (default_dest == hi->dest()) hi--; 478 } 479 480 // Find the total number of cases and ranges 481 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 482 int num_range = hi - lo + 1; 483 484 // Don't create table if: too large, too small, or too sparse. 485 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize) 486 return false; 487 if (num_cases > (MaxJumpTableSparseness * num_range)) 488 return false; 489 490 // Normalize table lookups to zero 491 int lowval = lo->lo(); 492 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 493 494 // Generate a guard to protect against input keyvals that aren't 495 // in the switch domain. 496 if (needs_guard) { 497 Node* size = _gvn.intcon(num_cases); 498 Node* cmp = _gvn.transform( new CmpUNode(key_val, size) ); 499 Node* tst = _gvn.transform( new BoolNode(cmp, BoolTest::ge) ); 500 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN); 501 jump_if_true_fork(iff, default_dest, NullTableIndex); 502 } 503 504 // Create an ideal node JumpTable that has projections 505 // of all possible ranges for a switch statement 506 // The key_val input must be converted to a pointer offset and scaled. 507 // Compare Parse::array_addressing above. 508 509 // Clean the 32-bit int into a real 64-bit offset. 510 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 511 const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); 512 // Make I2L conversion control dependent to prevent it from 513 // floating above the range check during loop optimizations. 514 key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); 515 516 // Shift the value by wordsize so we have an index into the table, rather 517 // than a switch value 518 Node *shiftWord = _gvn.MakeConX(wordSize); 519 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 520 521 // Create the JumpNode 522 Node* jtn = _gvn.transform( new JumpNode(control(), key_val, num_cases) ); 523 524 // These are the switch destinations hanging off the jumpnode 525 int i = 0; 526 for (SwitchRange* r = lo; r <= hi; r++) { 527 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 528 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 529 { 530 PreserveJVMState pjvms(this); 531 set_control(input); 532 jump_if_always_fork(r->dest(), r->table_index()); 533 } 534 } 535 } 536 assert(i == num_cases, "miscount of cases"); 537 stop_and_kill_map(); // no more uses for this JVMS 538 return true; 539 } 540 541 //----------------------------jump_switch_ranges------------------------------- 542 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 543 Block* switch_block = block(); 544 545 if (switch_depth == 0) { 546 // Do special processing for the top-level call. 547 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 548 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 549 550 // Decrement pred-numbers for the unique set of nodes. 551 #ifdef ASSERT 552 // Ensure that the block's successors are a (duplicate-free) set. 553 int successors_counted = 0; // block occurrences in [hi..lo] 554 int unique_successors = switch_block->num_successors(); 555 for (int i = 0; i < unique_successors; i++) { 556 Block* target = switch_block->successor_at(i); 557 558 // Check that the set of successors is the same in both places. 559 int successors_found = 0; 560 for (SwitchRange* p = lo; p <= hi; p++) { 561 if (p->dest() == target->start()) successors_found++; 562 } 563 assert(successors_found > 0, "successor must be known"); 564 successors_counted += successors_found; 565 } 566 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 567 #endif 568 569 // Maybe prune the inputs, based on the type of key_val. 570 jint min_val = min_jint; 571 jint max_val = max_jint; 572 const TypeInt* ti = key_val->bottom_type()->isa_int(); 573 if (ti != NULL) { 574 min_val = ti->_lo; 575 max_val = ti->_hi; 576 assert(min_val <= max_val, "invalid int type"); 577 } 578 while (lo->hi() < min_val) lo++; 579 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index()); 580 while (hi->lo() > max_val) hi--; 581 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index()); 582 } 583 584 #ifndef PRODUCT 585 if (switch_depth == 0) { 586 _max_switch_depth = 0; 587 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 588 } 589 #endif 590 591 assert(lo <= hi, "must be a non-empty set of ranges"); 592 if (lo == hi) { 593 jump_if_always_fork(lo->dest(), lo->table_index()); 594 } else { 595 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 596 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 597 598 if (create_jump_tables(key_val, lo, hi)) return; 599 600 int nr = hi - lo + 1; 601 602 SwitchRange* mid = lo + nr/2; 603 // if there is an easy choice, pivot at a singleton: 604 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 605 606 assert(lo < mid && mid <= hi, "good pivot choice"); 607 assert(nr != 2 || mid == hi, "should pick higher of 2"); 608 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 609 610 Node *test_val = _gvn.intcon(mid->lo()); 611 612 if (mid->is_singleton()) { 613 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne); 614 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index()); 615 616 // Special Case: If there are exactly three ranges, and the high 617 // and low range each go to the same place, omit the "gt" test, 618 // since it will not discriminate anything. 619 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest()); 620 if (eq_test_only) { 621 assert(mid == hi-1, ""); 622 } 623 624 // if there is a higher range, test for it and process it: 625 if (mid < hi && !eq_test_only) { 626 // two comparisons of same values--should enable 1 test for 2 branches 627 // Use BoolTest::le instead of BoolTest::gt 628 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le); 629 Node *iftrue = _gvn.transform( new IfTrueNode(iff_le) ); 630 Node *iffalse = _gvn.transform( new IfFalseNode(iff_le) ); 631 { PreserveJVMState pjvms(this); 632 set_control(iffalse); 633 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 634 } 635 set_control(iftrue); 636 } 637 638 } else { 639 // mid is a range, not a singleton, so treat mid..hi as a unit 640 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge); 641 642 // if there is a higher range, test for it and process it: 643 if (mid == hi) { 644 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index()); 645 } else { 646 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 647 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 648 { PreserveJVMState pjvms(this); 649 set_control(iftrue); 650 jump_switch_ranges(key_val, mid, hi, switch_depth+1); 651 } 652 set_control(iffalse); 653 } 654 } 655 656 // in any case, process the lower range 657 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 658 } 659 660 // Decrease pred_count for each successor after all is done. 661 if (switch_depth == 0) { 662 int unique_successors = switch_block->num_successors(); 663 for (int i = 0; i < unique_successors; i++) { 664 Block* target = switch_block->successor_at(i); 665 // Throw away the pre-allocated path for each unique successor. 666 target->next_path_num(); 667 } 668 } 669 670 #ifndef PRODUCT 671 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 672 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 673 SwitchRange* r; 674 int nsing = 0; 675 for( r = lo; r <= hi; r++ ) { 676 if( r->is_singleton() ) nsing++; 677 } 678 tty->print(">>> "); 679 _method->print_short_name(); 680 tty->print_cr(" switch decision tree"); 681 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 682 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 683 if (_max_switch_depth > _est_switch_depth) { 684 tty->print_cr("******** BAD SWITCH DEPTH ********"); 685 } 686 tty->print(" "); 687 for( r = lo; r <= hi; r++ ) { 688 r->print(); 689 } 690 tty->cr(); 691 } 692 #endif 693 } 694 695 void Parse::modf() { 696 Node *f2 = pop(); 697 Node *f1 = pop(); 698 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 699 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 700 "frem", NULL, //no memory effects 701 f1, f2); 702 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 703 704 push(res); 705 } 706 707 void Parse::modd() { 708 Node *d2 = pop_pair(); 709 Node *d1 = pop_pair(); 710 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 711 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 712 "drem", NULL, //no memory effects 713 d1, top(), d2, top()); 714 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 715 716 #ifdef ASSERT 717 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 718 assert(res_top == top(), "second value must be top"); 719 #endif 720 721 push_pair(res_d); 722 } 723 724 void Parse::l2f() { 725 Node* f2 = pop(); 726 Node* f1 = pop(); 727 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 728 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 729 "l2f", NULL, //no memory effects 730 f1, f2); 731 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 732 733 push(res); 734 } 735 736 void Parse::do_irem() { 737 // Must keep both values on the expression-stack during null-check 738 zero_check_int(peek()); 739 // Compile-time detect of null-exception? 740 if (stopped()) return; 741 742 Node* b = pop(); 743 Node* a = pop(); 744 745 const Type *t = _gvn.type(b); 746 if (t != Type::TOP) { 747 const TypeInt *ti = t->is_int(); 748 if (ti->is_con()) { 749 int divisor = ti->get_con(); 750 // check for positive power of 2 751 if (divisor > 0 && 752 (divisor & ~(divisor-1)) == divisor) { 753 // yes ! 754 Node *mask = _gvn.intcon((divisor - 1)); 755 // Sigh, must handle negative dividends 756 Node *zero = _gvn.intcon(0); 757 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt); 758 Node *iff = _gvn.transform( new IfFalseNode(ifff) ); 759 Node *ift = _gvn.transform( new IfTrueNode (ifff) ); 760 Node *reg = jump_if_join(ift, iff); 761 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 762 // Negative path; negate/and/negate 763 Node *neg = _gvn.transform( new SubINode(zero, a) ); 764 Node *andn= _gvn.transform( new AndINode(neg, mask) ); 765 Node *negn= _gvn.transform( new SubINode(zero, andn) ); 766 phi->init_req(1, negn); 767 // Fast positive case 768 Node *andx = _gvn.transform( new AndINode(a, mask) ); 769 phi->init_req(2, andx); 770 // Push the merge 771 push( _gvn.transform(phi) ); 772 return; 773 } 774 } 775 } 776 // Default case 777 push( _gvn.transform( new ModINode(control(),a,b) ) ); 778 } 779 780 // Handle jsr and jsr_w bytecode 781 void Parse::do_jsr() { 782 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 783 784 // Store information about current state, tagged with new _jsr_bci 785 int return_bci = iter().next_bci(); 786 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 787 788 // Update method data 789 profile_taken_branch(jsr_bci); 790 791 // The way we do things now, there is only one successor block 792 // for the jsr, because the target code is cloned by ciTypeFlow. 793 Block* target = successor_for_bci(jsr_bci); 794 795 // What got pushed? 796 const Type* ret_addr = target->peek(); 797 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 798 799 // Effect on jsr on stack 800 push(_gvn.makecon(ret_addr)); 801 802 // Flow to the jsr. 803 merge(jsr_bci); 804 } 805 806 // Handle ret bytecode 807 void Parse::do_ret() { 808 // Find to whom we return. 809 assert(block()->num_successors() == 1, "a ret can only go one place now"); 810 Block* target = block()->successor_at(0); 811 assert(!target->is_ready(), "our arrival must be expected"); 812 profile_ret(target->flow()->start()); 813 int pnum = target->next_path_num(); 814 merge_common(target, pnum); 815 } 816 817 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 818 if (btest != BoolTest::eq && btest != BoolTest::ne) { 819 // Only ::eq and ::ne are supported for profile injection. 820 return false; 821 } 822 if (test->is_Cmp() && 823 test->in(1)->Opcode() == Op_ProfileBoolean) { 824 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 825 int false_cnt = profile->false_count(); 826 int true_cnt = profile->true_count(); 827 828 // Counts matching depends on the actual test operation (::eq or ::ne). 829 // No need to scale the counts because profile injection was designed 830 // to feed exact counts into VM. 831 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 832 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 833 834 profile->consume(); 835 return true; 836 } 837 return false; 838 } 839 //--------------------------dynamic_branch_prediction-------------------------- 840 // Try to gather dynamic branch prediction behavior. Return a probability 841 // of the branch being taken and set the "cnt" field. Returns a -1.0 842 // if we need to use static prediction for some reason. 843 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 844 ResourceMark rm; 845 846 cnt = COUNT_UNKNOWN; 847 848 int taken = 0; 849 int not_taken = 0; 850 851 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 852 853 if (use_mdo) { 854 // Use MethodData information if it is available 855 // FIXME: free the ProfileData structure 856 ciMethodData* methodData = method()->method_data(); 857 if (!methodData->is_mature()) return PROB_UNKNOWN; 858 ciProfileData* data = methodData->bci_to_data(bci()); 859 if (data == NULL) { 860 return PROB_UNKNOWN; 861 } 862 if (!data->is_JumpData()) return PROB_UNKNOWN; 863 864 // get taken and not taken values 865 taken = data->as_JumpData()->taken(); 866 not_taken = 0; 867 if (data->is_BranchData()) { 868 not_taken = data->as_BranchData()->not_taken(); 869 } 870 871 // scale the counts to be commensurate with invocation counts: 872 taken = method()->scale_count(taken); 873 not_taken = method()->scale_count(not_taken); 874 } 875 876 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 877 // We also check that individual counters are positive first, otherwise the sum can become positive. 878 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 879 if (C->log() != NULL) { 880 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 881 } 882 return PROB_UNKNOWN; 883 } 884 885 // Compute frequency that we arrive here 886 float sum = taken + not_taken; 887 // Adjust, if this block is a cloned private block but the 888 // Jump counts are shared. Taken the private counts for 889 // just this path instead of the shared counts. 890 if( block()->count() > 0 ) 891 sum = block()->count(); 892 cnt = sum / FreqCountInvocations; 893 894 // Pin probability to sane limits 895 float prob; 896 if( !taken ) 897 prob = (0+PROB_MIN) / 2; 898 else if( !not_taken ) 899 prob = (1+PROB_MAX) / 2; 900 else { // Compute probability of true path 901 prob = (float)taken / (float)(taken + not_taken); 902 if (prob > PROB_MAX) prob = PROB_MAX; 903 if (prob < PROB_MIN) prob = PROB_MIN; 904 } 905 906 assert((cnt > 0.0f) && (prob > 0.0f), 907 "Bad frequency assignment in if"); 908 909 if (C->log() != NULL) { 910 const char* prob_str = NULL; 911 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 912 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 913 char prob_str_buf[30]; 914 if (prob_str == NULL) { 915 sprintf(prob_str_buf, "%g", prob); 916 prob_str = prob_str_buf; 917 } 918 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 919 iter().get_dest(), taken, not_taken, cnt, prob_str); 920 } 921 return prob; 922 } 923 924 //-----------------------------branch_prediction------------------------------- 925 float Parse::branch_prediction(float& cnt, 926 BoolTest::mask btest, 927 int target_bci, 928 Node* test) { 929 float prob = dynamic_branch_prediction(cnt, btest, test); 930 // If prob is unknown, switch to static prediction 931 if (prob != PROB_UNKNOWN) return prob; 932 933 prob = PROB_FAIR; // Set default value 934 if (btest == BoolTest::eq) // Exactly equal test? 935 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 936 else if (btest == BoolTest::ne) 937 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 938 939 // If this is a conditional test guarding a backwards branch, 940 // assume its a loop-back edge. Make it a likely taken branch. 941 if (target_bci < bci()) { 942 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 943 // Since it's an OSR, we probably have profile data, but since 944 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 945 // Let's make a special check here for completely zero counts. 946 ciMethodData* methodData = method()->method_data(); 947 if (!methodData->is_empty()) { 948 ciProfileData* data = methodData->bci_to_data(bci()); 949 // Only stop for truly zero counts, which mean an unknown part 950 // of the OSR-ed method, and we want to deopt to gather more stats. 951 // If you have ANY counts, then this loop is simply 'cold' relative 952 // to the OSR loop. 953 if (data == NULL || 954 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 955 // This is the only way to return PROB_UNKNOWN: 956 return PROB_UNKNOWN; 957 } 958 } 959 } 960 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 961 } 962 963 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 964 return prob; 965 } 966 967 // The magic constants are chosen so as to match the output of 968 // branch_prediction() when the profile reports a zero taken count. 969 // It is important to distinguish zero counts unambiguously, because 970 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 971 // very small but nonzero probabilities, which if confused with zero 972 // counts would keep the program recompiling indefinitely. 973 bool Parse::seems_never_taken(float prob) const { 974 return prob < PROB_MIN; 975 } 976 977 // True if the comparison seems to be the kind that will not change its 978 // statistics from true to false. See comments in adjust_map_after_if. 979 // This question is only asked along paths which are already 980 // classifed as untaken (by seems_never_taken), so really, 981 // if a path is never taken, its controlling comparison is 982 // already acting in a stable fashion. If the comparison 983 // seems stable, we will put an expensive uncommon trap 984 // on the untaken path. 985 bool Parse::seems_stable_comparison() const { 986 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 987 return false; 988 } 989 return true; 990 } 991 992 //-------------------------------repush_if_args-------------------------------- 993 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 994 inline int Parse::repush_if_args() { 995 if (PrintOpto && WizardMode) { 996 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 997 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 998 method()->print_name(); tty->cr(); 999 } 1000 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 1001 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 1002 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 1003 assert(argument(0) != NULL, "must exist"); 1004 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 1005 inc_sp(bc_depth); 1006 return bc_depth; 1007 } 1008 1009 //----------------------------------do_ifnull---------------------------------- 1010 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 1011 int target_bci = iter().get_dest(); 1012 1013 Block* branch_block = successor_for_bci(target_bci); 1014 Block* next_block = successor_for_bci(iter().next_bci()); 1015 1016 float cnt; 1017 float prob = branch_prediction(cnt, btest, target_bci, c); 1018 if (prob == PROB_UNKNOWN) { 1019 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 1020 if (PrintOpto && Verbose) { 1021 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1022 } 1023 repush_if_args(); // to gather stats on loop 1024 // We need to mark this branch as taken so that if we recompile we will 1025 // see that it is possible. In the tiered system the interpreter doesn't 1026 // do profiling and by the time we get to the lower tier from the interpreter 1027 // the path may be cold again. Make sure it doesn't look untaken 1028 profile_taken_branch(target_bci, !ProfileInterpreter); 1029 uncommon_trap(Deoptimization::Reason_unreached, 1030 Deoptimization::Action_reinterpret, 1031 NULL, "cold"); 1032 if (C->eliminate_boxing()) { 1033 // Mark the successor blocks as parsed 1034 branch_block->next_path_num(); 1035 next_block->next_path_num(); 1036 } 1037 return; 1038 } 1039 1040 NOT_PRODUCT(explicit_null_checks_inserted++); 1041 1042 // Generate real control flow 1043 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1044 1045 // Sanity check the probability value 1046 assert(prob > 0.0f,"Bad probability in Parser"); 1047 // Need xform to put node in hash table 1048 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1049 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1050 // True branch 1051 { PreserveJVMState pjvms(this); 1052 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1053 set_control(iftrue); 1054 1055 if (stopped()) { // Path is dead? 1056 NOT_PRODUCT(explicit_null_checks_elided++); 1057 if (C->eliminate_boxing()) { 1058 // Mark the successor block as parsed 1059 branch_block->next_path_num(); 1060 } 1061 } else { // Path is live. 1062 // Update method data 1063 profile_taken_branch(target_bci); 1064 adjust_map_after_if(btest, c, prob, branch_block, next_block); 1065 if (!stopped()) { 1066 merge(target_bci); 1067 } 1068 } 1069 } 1070 1071 // False branch 1072 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1073 set_control(iffalse); 1074 1075 if (stopped()) { // Path is dead? 1076 NOT_PRODUCT(explicit_null_checks_elided++); 1077 if (C->eliminate_boxing()) { 1078 // Mark the successor block as parsed 1079 next_block->next_path_num(); 1080 } 1081 } else { // Path is live. 1082 // Update method data 1083 profile_not_taken_branch(); 1084 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, 1085 next_block, branch_block); 1086 } 1087 } 1088 1089 //------------------------------------do_if------------------------------------ 1090 void Parse::do_if(BoolTest::mask btest, Node* c) { 1091 int target_bci = iter().get_dest(); 1092 1093 Block* branch_block = successor_for_bci(target_bci); 1094 Block* next_block = successor_for_bci(iter().next_bci()); 1095 1096 float cnt; 1097 float prob = branch_prediction(cnt, btest, target_bci, c); 1098 float untaken_prob = 1.0 - prob; 1099 1100 if (prob == PROB_UNKNOWN) { 1101 if (PrintOpto && Verbose) { 1102 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1103 } 1104 repush_if_args(); // to gather stats on loop 1105 // We need to mark this branch as taken so that if we recompile we will 1106 // see that it is possible. In the tiered system the interpreter doesn't 1107 // do profiling and by the time we get to the lower tier from the interpreter 1108 // the path may be cold again. Make sure it doesn't look untaken 1109 profile_taken_branch(target_bci, !ProfileInterpreter); 1110 uncommon_trap(Deoptimization::Reason_unreached, 1111 Deoptimization::Action_reinterpret, 1112 NULL, "cold"); 1113 if (C->eliminate_boxing()) { 1114 // Mark the successor blocks as parsed 1115 branch_block->next_path_num(); 1116 next_block->next_path_num(); 1117 } 1118 return; 1119 } 1120 1121 // Sanity check the probability value 1122 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1123 1124 bool taken_if_true = true; 1125 // Convert BoolTest to canonical form: 1126 if (!BoolTest(btest).is_canonical()) { 1127 btest = BoolTest(btest).negate(); 1128 taken_if_true = false; 1129 // prob is NOT updated here; it remains the probability of the taken 1130 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1131 } 1132 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1133 1134 Node* tst0 = new BoolNode(c, btest); 1135 Node* tst = _gvn.transform(tst0); 1136 BoolTest::mask taken_btest = BoolTest::illegal; 1137 BoolTest::mask untaken_btest = BoolTest::illegal; 1138 1139 if (tst->is_Bool()) { 1140 // Refresh c from the transformed bool node, since it may be 1141 // simpler than the original c. Also re-canonicalize btest. 1142 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1143 // That can arise from statements like: if (x instanceof C) ... 1144 if (tst != tst0) { 1145 // Canonicalize one more time since transform can change it. 1146 btest = tst->as_Bool()->_test._test; 1147 if (!BoolTest(btest).is_canonical()) { 1148 // Reverse edges one more time... 1149 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1150 btest = tst->as_Bool()->_test._test; 1151 assert(BoolTest(btest).is_canonical(), "sanity"); 1152 taken_if_true = !taken_if_true; 1153 } 1154 c = tst->in(1); 1155 } 1156 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1157 taken_btest = taken_if_true ? btest : neg_btest; 1158 untaken_btest = taken_if_true ? neg_btest : btest; 1159 } 1160 1161 // Generate real control flow 1162 float true_prob = (taken_if_true ? prob : untaken_prob); 1163 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1164 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1165 Node* taken_branch = new IfTrueNode(iff); 1166 Node* untaken_branch = new IfFalseNode(iff); 1167 if (!taken_if_true) { // Finish conversion to canonical form 1168 Node* tmp = taken_branch; 1169 taken_branch = untaken_branch; 1170 untaken_branch = tmp; 1171 } 1172 1173 // Branch is taken: 1174 { PreserveJVMState pjvms(this); 1175 taken_branch = _gvn.transform(taken_branch); 1176 set_control(taken_branch); 1177 1178 if (stopped()) { 1179 if (C->eliminate_boxing()) { 1180 // Mark the successor block as parsed 1181 branch_block->next_path_num(); 1182 } 1183 } else { 1184 // Update method data 1185 profile_taken_branch(target_bci); 1186 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block); 1187 if (!stopped()) { 1188 merge(target_bci); 1189 } 1190 } 1191 } 1192 1193 untaken_branch = _gvn.transform(untaken_branch); 1194 set_control(untaken_branch); 1195 1196 // Branch not taken. 1197 if (stopped()) { 1198 if (C->eliminate_boxing()) { 1199 // Mark the successor block as parsed 1200 next_block->next_path_num(); 1201 } 1202 } else { 1203 // Update method data 1204 profile_not_taken_branch(); 1205 adjust_map_after_if(untaken_btest, c, untaken_prob, 1206 next_block, branch_block); 1207 } 1208 } 1209 1210 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 1211 // Don't want to speculate on uncommon traps when running with -Xcomp 1212 if (!UseInterpreter) { 1213 return false; 1214 } 1215 return (seems_never_taken(prob) && seems_stable_comparison()); 1216 } 1217 1218 //----------------------------adjust_map_after_if------------------------------ 1219 // Adjust the JVM state to reflect the result of taking this path. 1220 // Basically, it means inspecting the CmpNode controlling this 1221 // branch, seeing how it constrains a tested value, and then 1222 // deciding if it's worth our while to encode this constraint 1223 // as graph nodes in the current abstract interpretation map. 1224 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, 1225 Block* path, Block* other_path) { 1226 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal) 1227 return; // nothing to do 1228 1229 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 1230 1231 if (path_is_suitable_for_uncommon_trap(prob)) { 1232 repush_if_args(); 1233 uncommon_trap(Deoptimization::Reason_unstable_if, 1234 Deoptimization::Action_reinterpret, 1235 NULL, 1236 (is_fallthrough ? "taken always" : "taken never")); 1237 return; 1238 } 1239 1240 Node* val = c->in(1); 1241 Node* con = c->in(2); 1242 const Type* tcon = _gvn.type(con); 1243 const Type* tval = _gvn.type(val); 1244 bool have_con = tcon->singleton(); 1245 if (tval->singleton()) { 1246 if (!have_con) { 1247 // Swap, so constant is in con. 1248 con = val; 1249 tcon = tval; 1250 val = c->in(2); 1251 tval = _gvn.type(val); 1252 btest = BoolTest(btest).commute(); 1253 have_con = true; 1254 } else { 1255 // Do we have two constants? Then leave well enough alone. 1256 have_con = false; 1257 } 1258 } 1259 if (!have_con) // remaining adjustments need a con 1260 return; 1261 1262 sharpen_type_after_if(btest, con, tcon, val, tval); 1263 } 1264 1265 1266 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 1267 Node* ldk; 1268 if (n->is_DecodeNKlass()) { 1269 if (n->in(1)->Opcode() != Op_LoadNKlass) { 1270 return NULL; 1271 } else { 1272 ldk = n->in(1); 1273 } 1274 } else if (n->Opcode() != Op_LoadKlass) { 1275 return NULL; 1276 } else { 1277 ldk = n; 1278 } 1279 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 1280 1281 Node* adr = ldk->in(MemNode::Address); 1282 intptr_t off = 0; 1283 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 1284 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 1285 return NULL; 1286 const TypePtr* tp = gvn->type(obj)->is_ptr(); 1287 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 1288 return NULL; 1289 1290 return obj; 1291 } 1292 1293 void Parse::sharpen_type_after_if(BoolTest::mask btest, 1294 Node* con, const Type* tcon, 1295 Node* val, const Type* tval) { 1296 // Look for opportunities to sharpen the type of a node 1297 // whose klass is compared with a constant klass. 1298 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 1299 Node* obj = extract_obj_from_klass_load(&_gvn, val); 1300 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 1301 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 1302 // Found: 1303 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 1304 // or the narrowOop equivalent. 1305 const Type* obj_type = _gvn.type(obj); 1306 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 1307 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 1308 tboth->higher_equal(obj_type)) { 1309 // obj has to be of the exact type Foo if the CmpP succeeds. 1310 int obj_in_map = map()->find_edge(obj); 1311 JVMState* jvms = this->jvms(); 1312 if (obj_in_map >= 0 && 1313 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 1314 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 1315 const Type* tcc = ccast->as_Type()->type(); 1316 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 1317 // Delay transform() call to allow recovery of pre-cast value 1318 // at the control merge. 1319 _gvn.set_type_bottom(ccast); 1320 record_for_igvn(ccast); 1321 // Here's the payoff. 1322 replace_in_map(obj, ccast); 1323 } 1324 } 1325 } 1326 } 1327 1328 int val_in_map = map()->find_edge(val); 1329 if (val_in_map < 0) return; // replace_in_map would be useless 1330 { 1331 JVMState* jvms = this->jvms(); 1332 if (!(jvms->is_loc(val_in_map) || 1333 jvms->is_stk(val_in_map))) 1334 return; // again, it would be useless 1335 } 1336 1337 // Check for a comparison to a constant, and "know" that the compared 1338 // value is constrained on this path. 1339 assert(tcon->singleton(), ""); 1340 ConstraintCastNode* ccast = NULL; 1341 Node* cast = NULL; 1342 1343 switch (btest) { 1344 case BoolTest::eq: // Constant test? 1345 { 1346 const Type* tboth = tcon->join_speculative(tval); 1347 if (tboth == tval) break; // Nothing to gain. 1348 if (tcon->isa_int()) { 1349 ccast = new CastIINode(val, tboth); 1350 } else if (tcon == TypePtr::NULL_PTR) { 1351 // Cast to null, but keep the pointer identity temporarily live. 1352 ccast = new CastPPNode(val, tboth); 1353 } else { 1354 const TypeF* tf = tcon->isa_float_constant(); 1355 const TypeD* td = tcon->isa_double_constant(); 1356 // Exclude tests vs float/double 0 as these could be 1357 // either +0 or -0. Just because you are equal to +0 1358 // doesn't mean you ARE +0! 1359 // Note, following code also replaces Long and Oop values. 1360 if ((!tf || tf->_f != 0.0) && 1361 (!td || td->_d != 0.0)) 1362 cast = con; // Replace non-constant val by con. 1363 } 1364 } 1365 break; 1366 1367 case BoolTest::ne: 1368 if (tcon == TypePtr::NULL_PTR) { 1369 cast = cast_not_null(val, false); 1370 } 1371 break; 1372 1373 default: 1374 // (At this point we could record int range types with CastII.) 1375 break; 1376 } 1377 1378 if (ccast != NULL) { 1379 const Type* tcc = ccast->as_Type()->type(); 1380 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 1381 // Delay transform() call to allow recovery of pre-cast value 1382 // at the control merge. 1383 ccast->set_req(0, control()); 1384 _gvn.set_type_bottom(ccast); 1385 record_for_igvn(ccast); 1386 cast = ccast; 1387 } 1388 1389 if (cast != NULL) { // Here's the payoff. 1390 replace_in_map(val, cast); 1391 } 1392 } 1393 1394 /** 1395 * Use speculative type to optimize CmpP node: if comparison is 1396 * against the low level class, cast the object to the speculative 1397 * type if any. CmpP should then go away. 1398 * 1399 * @param c expected CmpP node 1400 * @return result of CmpP on object casted to speculative type 1401 * 1402 */ 1403 Node* Parse::optimize_cmp_with_klass(Node* c) { 1404 // If this is transformed by the _gvn to a comparison with the low 1405 // level klass then we may be able to use speculation 1406 if (c->Opcode() == Op_CmpP && 1407 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 1408 c->in(2)->is_Con()) { 1409 Node* load_klass = NULL; 1410 Node* decode = NULL; 1411 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 1412 decode = c->in(1); 1413 load_klass = c->in(1)->in(1); 1414 } else { 1415 load_klass = c->in(1); 1416 } 1417 if (load_klass->in(2)->is_AddP()) { 1418 Node* addp = load_klass->in(2); 1419 Node* obj = addp->in(AddPNode::Address); 1420 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 1421 if (obj_type->speculative_type_not_null() != NULL) { 1422 ciKlass* k = obj_type->speculative_type(); 1423 inc_sp(2); 1424 obj = maybe_cast_profiled_obj(obj, k); 1425 dec_sp(2); 1426 // Make the CmpP use the casted obj 1427 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 1428 load_klass = load_klass->clone(); 1429 load_klass->set_req(2, addp); 1430 load_klass = _gvn.transform(load_klass); 1431 if (decode != NULL) { 1432 decode = decode->clone(); 1433 decode->set_req(1, load_klass); 1434 load_klass = _gvn.transform(decode); 1435 } 1436 c = c->clone(); 1437 c->set_req(1, load_klass); 1438 c = _gvn.transform(c); 1439 } 1440 } 1441 } 1442 return c; 1443 } 1444 1445 //------------------------------do_one_bytecode-------------------------------- 1446 // Parse this bytecode, and alter the Parsers JVM->Node mapping 1447 void Parse::do_one_bytecode() { 1448 Node *a, *b, *c, *d; // Handy temps 1449 BoolTest::mask btest; 1450 int i; 1451 1452 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 1453 1454 if (C->check_node_count(NodeLimitFudgeFactor * 5, 1455 "out of nodes parsing method")) { 1456 return; 1457 } 1458 1459 #ifdef ASSERT 1460 // for setting breakpoints 1461 if (TraceOptoParse) { 1462 tty->print(" @"); 1463 dump_bci(bci()); 1464 tty->cr(); 1465 } 1466 #endif 1467 1468 switch (bc()) { 1469 case Bytecodes::_nop: 1470 // do nothing 1471 break; 1472 case Bytecodes::_lconst_0: 1473 push_pair(longcon(0)); 1474 break; 1475 1476 case Bytecodes::_lconst_1: 1477 push_pair(longcon(1)); 1478 break; 1479 1480 case Bytecodes::_fconst_0: 1481 push(zerocon(T_FLOAT)); 1482 break; 1483 1484 case Bytecodes::_fconst_1: 1485 push(makecon(TypeF::ONE)); 1486 break; 1487 1488 case Bytecodes::_fconst_2: 1489 push(makecon(TypeF::make(2.0f))); 1490 break; 1491 1492 case Bytecodes::_dconst_0: 1493 push_pair(zerocon(T_DOUBLE)); 1494 break; 1495 1496 case Bytecodes::_dconst_1: 1497 push_pair(makecon(TypeD::ONE)); 1498 break; 1499 1500 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 1501 case Bytecodes::_iconst_0: push(intcon( 0)); break; 1502 case Bytecodes::_iconst_1: push(intcon( 1)); break; 1503 case Bytecodes::_iconst_2: push(intcon( 2)); break; 1504 case Bytecodes::_iconst_3: push(intcon( 3)); break; 1505 case Bytecodes::_iconst_4: push(intcon( 4)); break; 1506 case Bytecodes::_iconst_5: push(intcon( 5)); break; 1507 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 1508 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 1509 case Bytecodes::_aconst_null: push(null()); break; 1510 case Bytecodes::_ldc: 1511 case Bytecodes::_ldc_w: 1512 case Bytecodes::_ldc2_w: 1513 // If the constant is unresolved, run this BC once in the interpreter. 1514 { 1515 ciConstant constant = iter().get_constant(); 1516 if (constant.basic_type() == T_OBJECT && 1517 !constant.as_object()->is_loaded()) { 1518 int index = iter().get_constant_pool_index(); 1519 constantTag tag = iter().get_constant_pool_tag(index); 1520 uncommon_trap(Deoptimization::make_trap_request 1521 (Deoptimization::Reason_unloaded, 1522 Deoptimization::Action_reinterpret, 1523 index), 1524 NULL, tag.internal_name()); 1525 break; 1526 } 1527 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 1528 "must be java_mirror of klass"); 1529 const Type* con_type = Type::make_from_constant(constant); 1530 if (con_type != NULL) { 1531 push_node(con_type->basic_type(), makecon(con_type)); 1532 } 1533 } 1534 1535 break; 1536 1537 case Bytecodes::_aload_0: 1538 push( local(0) ); 1539 break; 1540 case Bytecodes::_aload_1: 1541 push( local(1) ); 1542 break; 1543 case Bytecodes::_aload_2: 1544 push( local(2) ); 1545 break; 1546 case Bytecodes::_aload_3: 1547 push( local(3) ); 1548 break; 1549 case Bytecodes::_aload: 1550 push( local(iter().get_index()) ); 1551 break; 1552 1553 case Bytecodes::_fload_0: 1554 case Bytecodes::_iload_0: 1555 push( local(0) ); 1556 break; 1557 case Bytecodes::_fload_1: 1558 case Bytecodes::_iload_1: 1559 push( local(1) ); 1560 break; 1561 case Bytecodes::_fload_2: 1562 case Bytecodes::_iload_2: 1563 push( local(2) ); 1564 break; 1565 case Bytecodes::_fload_3: 1566 case Bytecodes::_iload_3: 1567 push( local(3) ); 1568 break; 1569 case Bytecodes::_fload: 1570 case Bytecodes::_iload: 1571 push( local(iter().get_index()) ); 1572 break; 1573 case Bytecodes::_lload_0: 1574 push_pair_local( 0 ); 1575 break; 1576 case Bytecodes::_lload_1: 1577 push_pair_local( 1 ); 1578 break; 1579 case Bytecodes::_lload_2: 1580 push_pair_local( 2 ); 1581 break; 1582 case Bytecodes::_lload_3: 1583 push_pair_local( 3 ); 1584 break; 1585 case Bytecodes::_lload: 1586 push_pair_local( iter().get_index() ); 1587 break; 1588 1589 case Bytecodes::_dload_0: 1590 push_pair_local(0); 1591 break; 1592 case Bytecodes::_dload_1: 1593 push_pair_local(1); 1594 break; 1595 case Bytecodes::_dload_2: 1596 push_pair_local(2); 1597 break; 1598 case Bytecodes::_dload_3: 1599 push_pair_local(3); 1600 break; 1601 case Bytecodes::_dload: 1602 push_pair_local(iter().get_index()); 1603 break; 1604 case Bytecodes::_fstore_0: 1605 case Bytecodes::_istore_0: 1606 case Bytecodes::_astore_0: 1607 set_local( 0, pop() ); 1608 break; 1609 case Bytecodes::_fstore_1: 1610 case Bytecodes::_istore_1: 1611 case Bytecodes::_astore_1: 1612 set_local( 1, pop() ); 1613 break; 1614 case Bytecodes::_fstore_2: 1615 case Bytecodes::_istore_2: 1616 case Bytecodes::_astore_2: 1617 set_local( 2, pop() ); 1618 break; 1619 case Bytecodes::_fstore_3: 1620 case Bytecodes::_istore_3: 1621 case Bytecodes::_astore_3: 1622 set_local( 3, pop() ); 1623 break; 1624 case Bytecodes::_fstore: 1625 case Bytecodes::_istore: 1626 case Bytecodes::_astore: 1627 set_local( iter().get_index(), pop() ); 1628 break; 1629 // long stores 1630 case Bytecodes::_lstore_0: 1631 set_pair_local( 0, pop_pair() ); 1632 break; 1633 case Bytecodes::_lstore_1: 1634 set_pair_local( 1, pop_pair() ); 1635 break; 1636 case Bytecodes::_lstore_2: 1637 set_pair_local( 2, pop_pair() ); 1638 break; 1639 case Bytecodes::_lstore_3: 1640 set_pair_local( 3, pop_pair() ); 1641 break; 1642 case Bytecodes::_lstore: 1643 set_pair_local( iter().get_index(), pop_pair() ); 1644 break; 1645 1646 // double stores 1647 case Bytecodes::_dstore_0: 1648 set_pair_local( 0, dstore_rounding(pop_pair()) ); 1649 break; 1650 case Bytecodes::_dstore_1: 1651 set_pair_local( 1, dstore_rounding(pop_pair()) ); 1652 break; 1653 case Bytecodes::_dstore_2: 1654 set_pair_local( 2, dstore_rounding(pop_pair()) ); 1655 break; 1656 case Bytecodes::_dstore_3: 1657 set_pair_local( 3, dstore_rounding(pop_pair()) ); 1658 break; 1659 case Bytecodes::_dstore: 1660 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 1661 break; 1662 1663 case Bytecodes::_pop: dec_sp(1); break; 1664 case Bytecodes::_pop2: dec_sp(2); break; 1665 case Bytecodes::_swap: 1666 a = pop(); 1667 b = pop(); 1668 push(a); 1669 push(b); 1670 break; 1671 case Bytecodes::_dup: 1672 a = pop(); 1673 push(a); 1674 push(a); 1675 break; 1676 case Bytecodes::_dup_x1: 1677 a = pop(); 1678 b = pop(); 1679 push( a ); 1680 push( b ); 1681 push( a ); 1682 break; 1683 case Bytecodes::_dup_x2: 1684 a = pop(); 1685 b = pop(); 1686 c = pop(); 1687 push( a ); 1688 push( c ); 1689 push( b ); 1690 push( a ); 1691 break; 1692 case Bytecodes::_dup2: 1693 a = pop(); 1694 b = pop(); 1695 push( b ); 1696 push( a ); 1697 push( b ); 1698 push( a ); 1699 break; 1700 1701 case Bytecodes::_dup2_x1: 1702 // before: .. c, b, a 1703 // after: .. b, a, c, b, a 1704 // not tested 1705 a = pop(); 1706 b = pop(); 1707 c = pop(); 1708 push( b ); 1709 push( a ); 1710 push( c ); 1711 push( b ); 1712 push( a ); 1713 break; 1714 case Bytecodes::_dup2_x2: 1715 // before: .. d, c, b, a 1716 // after: .. b, a, d, c, b, a 1717 // not tested 1718 a = pop(); 1719 b = pop(); 1720 c = pop(); 1721 d = pop(); 1722 push( b ); 1723 push( a ); 1724 push( d ); 1725 push( c ); 1726 push( b ); 1727 push( a ); 1728 break; 1729 1730 case Bytecodes::_arraylength: { 1731 // Must do null-check with value on expression stack 1732 Node *ary = null_check(peek(), T_ARRAY); 1733 // Compile-time detect of null-exception? 1734 if (stopped()) return; 1735 a = pop(); 1736 push(load_array_length(a)); 1737 break; 1738 } 1739 1740 case Bytecodes::_baload: array_load(T_BYTE); break; 1741 case Bytecodes::_caload: array_load(T_CHAR); break; 1742 case Bytecodes::_iaload: array_load(T_INT); break; 1743 case Bytecodes::_saload: array_load(T_SHORT); break; 1744 case Bytecodes::_faload: array_load(T_FLOAT); break; 1745 case Bytecodes::_aaload: array_load(T_OBJECT); break; 1746 case Bytecodes::_laload: array_load(T_LONG); break; 1747 case Bytecodes::_daload: array_load(T_DOUBLE); break; 1748 case Bytecodes::_bastore: array_store(T_BYTE); break; 1749 case Bytecodes::_castore: array_store(T_CHAR); break; 1750 case Bytecodes::_iastore: array_store(T_INT); break; 1751 case Bytecodes::_sastore: array_store(T_SHORT); break; 1752 case Bytecodes::_fastore: array_store(T_FLOAT); break; 1753 case Bytecodes::_aastore: array_store(T_OBJECT); break; 1754 case Bytecodes::_lastore: array_store(T_LONG); break; 1755 case Bytecodes::_dastore: array_store(T_DOUBLE); break; 1756 1757 case Bytecodes::_getfield: 1758 do_getfield(); 1759 break; 1760 1761 case Bytecodes::_getstatic: 1762 do_getstatic(); 1763 break; 1764 1765 case Bytecodes::_putfield: 1766 do_putfield(); 1767 break; 1768 1769 case Bytecodes::_putstatic: 1770 do_putstatic(); 1771 break; 1772 1773 case Bytecodes::_irem: 1774 do_irem(); 1775 break; 1776 case Bytecodes::_idiv: 1777 // Must keep both values on the expression-stack during null-check 1778 zero_check_int(peek()); 1779 // Compile-time detect of null-exception? 1780 if (stopped()) return; 1781 b = pop(); 1782 a = pop(); 1783 push( _gvn.transform( new DivINode(control(),a,b) ) ); 1784 break; 1785 case Bytecodes::_imul: 1786 b = pop(); a = pop(); 1787 push( _gvn.transform( new MulINode(a,b) ) ); 1788 break; 1789 case Bytecodes::_iadd: 1790 b = pop(); a = pop(); 1791 push( _gvn.transform( new AddINode(a,b) ) ); 1792 break; 1793 case Bytecodes::_ineg: 1794 a = pop(); 1795 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 1796 break; 1797 case Bytecodes::_isub: 1798 b = pop(); a = pop(); 1799 push( _gvn.transform( new SubINode(a,b) ) ); 1800 break; 1801 case Bytecodes::_iand: 1802 b = pop(); a = pop(); 1803 push( _gvn.transform( new AndINode(a,b) ) ); 1804 break; 1805 case Bytecodes::_ior: 1806 b = pop(); a = pop(); 1807 push( _gvn.transform( new OrINode(a,b) ) ); 1808 break; 1809 case Bytecodes::_ixor: 1810 b = pop(); a = pop(); 1811 push( _gvn.transform( new XorINode(a,b) ) ); 1812 break; 1813 case Bytecodes::_ishl: 1814 b = pop(); a = pop(); 1815 push( _gvn.transform( new LShiftINode(a,b) ) ); 1816 break; 1817 case Bytecodes::_ishr: 1818 b = pop(); a = pop(); 1819 push( _gvn.transform( new RShiftINode(a,b) ) ); 1820 break; 1821 case Bytecodes::_iushr: 1822 b = pop(); a = pop(); 1823 push( _gvn.transform( new URShiftINode(a,b) ) ); 1824 break; 1825 1826 case Bytecodes::_fneg: 1827 a = pop(); 1828 b = _gvn.transform(new NegFNode (a)); 1829 push(b); 1830 break; 1831 1832 case Bytecodes::_fsub: 1833 b = pop(); 1834 a = pop(); 1835 c = _gvn.transform( new SubFNode(a,b) ); 1836 d = precision_rounding(c); 1837 push( d ); 1838 break; 1839 1840 case Bytecodes::_fadd: 1841 b = pop(); 1842 a = pop(); 1843 c = _gvn.transform( new AddFNode(a,b) ); 1844 d = precision_rounding(c); 1845 push( d ); 1846 break; 1847 1848 case Bytecodes::_fmul: 1849 b = pop(); 1850 a = pop(); 1851 c = _gvn.transform( new MulFNode(a,b) ); 1852 d = precision_rounding(c); 1853 push( d ); 1854 break; 1855 1856 case Bytecodes::_fdiv: 1857 b = pop(); 1858 a = pop(); 1859 c = _gvn.transform( new DivFNode(0,a,b) ); 1860 d = precision_rounding(c); 1861 push( d ); 1862 break; 1863 1864 case Bytecodes::_frem: 1865 if (Matcher::has_match_rule(Op_ModF)) { 1866 // Generate a ModF node. 1867 b = pop(); 1868 a = pop(); 1869 c = _gvn.transform( new ModFNode(0,a,b) ); 1870 d = precision_rounding(c); 1871 push( d ); 1872 } 1873 else { 1874 // Generate a call. 1875 modf(); 1876 } 1877 break; 1878 1879 case Bytecodes::_fcmpl: 1880 b = pop(); 1881 a = pop(); 1882 c = _gvn.transform( new CmpF3Node( a, b)); 1883 push(c); 1884 break; 1885 case Bytecodes::_fcmpg: 1886 b = pop(); 1887 a = pop(); 1888 1889 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 1890 // which negates the result sign except for unordered. Flip the unordered 1891 // as well by using CmpF3 which implements unordered-lesser instead of 1892 // unordered-greater semantics. Finally, commute the result bits. Result 1893 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 1894 c = _gvn.transform( new CmpF3Node( b, a)); 1895 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 1896 push(c); 1897 break; 1898 1899 case Bytecodes::_f2i: 1900 a = pop(); 1901 push(_gvn.transform(new ConvF2INode(a))); 1902 break; 1903 1904 case Bytecodes::_d2i: 1905 a = pop_pair(); 1906 b = _gvn.transform(new ConvD2INode(a)); 1907 push( b ); 1908 break; 1909 1910 case Bytecodes::_f2d: 1911 a = pop(); 1912 b = _gvn.transform( new ConvF2DNode(a)); 1913 push_pair( b ); 1914 break; 1915 1916 case Bytecodes::_d2f: 1917 a = pop_pair(); 1918 b = _gvn.transform( new ConvD2FNode(a)); 1919 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 1920 //b = _gvn.transform(new RoundFloatNode(0, b) ); 1921 push( b ); 1922 break; 1923 1924 case Bytecodes::_l2f: 1925 if (Matcher::convL2FSupported()) { 1926 a = pop_pair(); 1927 b = _gvn.transform( new ConvL2FNode(a)); 1928 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 1929 // Rather than storing the result into an FP register then pushing 1930 // out to memory to round, the machine instruction that implements 1931 // ConvL2D is responsible for rounding. 1932 // c = precision_rounding(b); 1933 c = _gvn.transform(b); 1934 push(c); 1935 } else { 1936 l2f(); 1937 } 1938 break; 1939 1940 case Bytecodes::_l2d: 1941 a = pop_pair(); 1942 b = _gvn.transform( new ConvL2DNode(a)); 1943 // For i486.ad, rounding is always necessary (see _l2f above). 1944 // c = dprecision_rounding(b); 1945 c = _gvn.transform(b); 1946 push_pair(c); 1947 break; 1948 1949 case Bytecodes::_f2l: 1950 a = pop(); 1951 b = _gvn.transform( new ConvF2LNode(a)); 1952 push_pair(b); 1953 break; 1954 1955 case Bytecodes::_d2l: 1956 a = pop_pair(); 1957 b = _gvn.transform( new ConvD2LNode(a)); 1958 push_pair(b); 1959 break; 1960 1961 case Bytecodes::_dsub: 1962 b = pop_pair(); 1963 a = pop_pair(); 1964 c = _gvn.transform( new SubDNode(a,b) ); 1965 d = dprecision_rounding(c); 1966 push_pair( d ); 1967 break; 1968 1969 case Bytecodes::_dadd: 1970 b = pop_pair(); 1971 a = pop_pair(); 1972 c = _gvn.transform( new AddDNode(a,b) ); 1973 d = dprecision_rounding(c); 1974 push_pair( d ); 1975 break; 1976 1977 case Bytecodes::_dmul: 1978 b = pop_pair(); 1979 a = pop_pair(); 1980 c = _gvn.transform( new MulDNode(a,b) ); 1981 d = dprecision_rounding(c); 1982 push_pair( d ); 1983 break; 1984 1985 case Bytecodes::_ddiv: 1986 b = pop_pair(); 1987 a = pop_pair(); 1988 c = _gvn.transform( new DivDNode(0,a,b) ); 1989 d = dprecision_rounding(c); 1990 push_pair( d ); 1991 break; 1992 1993 case Bytecodes::_dneg: 1994 a = pop_pair(); 1995 b = _gvn.transform(new NegDNode (a)); 1996 push_pair(b); 1997 break; 1998 1999 case Bytecodes::_drem: 2000 if (Matcher::has_match_rule(Op_ModD)) { 2001 // Generate a ModD node. 2002 b = pop_pair(); 2003 a = pop_pair(); 2004 // a % b 2005 2006 c = _gvn.transform( new ModDNode(0,a,b) ); 2007 d = dprecision_rounding(c); 2008 push_pair( d ); 2009 } 2010 else { 2011 // Generate a call. 2012 modd(); 2013 } 2014 break; 2015 2016 case Bytecodes::_dcmpl: 2017 b = pop_pair(); 2018 a = pop_pair(); 2019 c = _gvn.transform( new CmpD3Node( a, b)); 2020 push(c); 2021 break; 2022 2023 case Bytecodes::_dcmpg: 2024 b = pop_pair(); 2025 a = pop_pair(); 2026 // Same as dcmpl but need to flip the unordered case. 2027 // Commute the inputs, which negates the result sign except for unordered. 2028 // Flip the unordered as well by using CmpD3 which implements 2029 // unordered-lesser instead of unordered-greater semantics. 2030 // Finally, negate the result bits. Result is same as using a 2031 // CmpD3Greater except we did it with CmpD3 alone. 2032 c = _gvn.transform( new CmpD3Node( b, a)); 2033 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2034 push(c); 2035 break; 2036 2037 2038 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2039 case Bytecodes::_land: 2040 b = pop_pair(); 2041 a = pop_pair(); 2042 c = _gvn.transform( new AndLNode(a,b) ); 2043 push_pair(c); 2044 break; 2045 case Bytecodes::_lor: 2046 b = pop_pair(); 2047 a = pop_pair(); 2048 c = _gvn.transform( new OrLNode(a,b) ); 2049 push_pair(c); 2050 break; 2051 case Bytecodes::_lxor: 2052 b = pop_pair(); 2053 a = pop_pair(); 2054 c = _gvn.transform( new XorLNode(a,b) ); 2055 push_pair(c); 2056 break; 2057 2058 case Bytecodes::_lshl: 2059 b = pop(); // the shift count 2060 a = pop_pair(); // value to be shifted 2061 c = _gvn.transform( new LShiftLNode(a,b) ); 2062 push_pair(c); 2063 break; 2064 case Bytecodes::_lshr: 2065 b = pop(); // the shift count 2066 a = pop_pair(); // value to be shifted 2067 c = _gvn.transform( new RShiftLNode(a,b) ); 2068 push_pair(c); 2069 break; 2070 case Bytecodes::_lushr: 2071 b = pop(); // the shift count 2072 a = pop_pair(); // value to be shifted 2073 c = _gvn.transform( new URShiftLNode(a,b) ); 2074 push_pair(c); 2075 break; 2076 case Bytecodes::_lmul: 2077 b = pop_pair(); 2078 a = pop_pair(); 2079 c = _gvn.transform( new MulLNode(a,b) ); 2080 push_pair(c); 2081 break; 2082 2083 case Bytecodes::_lrem: 2084 // Must keep both values on the expression-stack during null-check 2085 assert(peek(0) == top(), "long word order"); 2086 zero_check_long(peek(1)); 2087 // Compile-time detect of null-exception? 2088 if (stopped()) return; 2089 b = pop_pair(); 2090 a = pop_pair(); 2091 c = _gvn.transform( new ModLNode(control(),a,b) ); 2092 push_pair(c); 2093 break; 2094 2095 case Bytecodes::_ldiv: 2096 // Must keep both values on the expression-stack during null-check 2097 assert(peek(0) == top(), "long word order"); 2098 zero_check_long(peek(1)); 2099 // Compile-time detect of null-exception? 2100 if (stopped()) return; 2101 b = pop_pair(); 2102 a = pop_pair(); 2103 c = _gvn.transform( new DivLNode(control(),a,b) ); 2104 push_pair(c); 2105 break; 2106 2107 case Bytecodes::_ladd: 2108 b = pop_pair(); 2109 a = pop_pair(); 2110 c = _gvn.transform( new AddLNode(a,b) ); 2111 push_pair(c); 2112 break; 2113 case Bytecodes::_lsub: 2114 b = pop_pair(); 2115 a = pop_pair(); 2116 c = _gvn.transform( new SubLNode(a,b) ); 2117 push_pair(c); 2118 break; 2119 case Bytecodes::_lcmp: 2120 // Safepoints are now inserted _before_ branches. The long-compare 2121 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 2122 // slew of control flow. These are usually followed by a CmpI vs zero and 2123 // a branch; this pattern then optimizes to the obvious long-compare and 2124 // branch. However, if the branch is backwards there's a Safepoint 2125 // inserted. The inserted Safepoint captures the JVM state at the 2126 // pre-branch point, i.e. it captures the 3-way value. Thus if a 2127 // long-compare is used to control a loop the debug info will force 2128 // computation of the 3-way value, even though the generated code uses a 2129 // long-compare and branch. We try to rectify the situation by inserting 2130 // a SafePoint here and have it dominate and kill the safepoint added at a 2131 // following backwards branch. At this point the JVM state merely holds 2 2132 // longs but not the 3-way value. 2133 if( UseLoopSafepoints ) { 2134 switch( iter().next_bc() ) { 2135 case Bytecodes::_ifgt: 2136 case Bytecodes::_iflt: 2137 case Bytecodes::_ifge: 2138 case Bytecodes::_ifle: 2139 case Bytecodes::_ifne: 2140 case Bytecodes::_ifeq: 2141 // If this is a backwards branch in the bytecodes, add Safepoint 2142 maybe_add_safepoint(iter().next_get_dest()); 2143 } 2144 } 2145 b = pop_pair(); 2146 a = pop_pair(); 2147 c = _gvn.transform( new CmpL3Node( a, b )); 2148 push(c); 2149 break; 2150 2151 case Bytecodes::_lneg: 2152 a = pop_pair(); 2153 b = _gvn.transform( new SubLNode(longcon(0),a)); 2154 push_pair(b); 2155 break; 2156 case Bytecodes::_l2i: 2157 a = pop_pair(); 2158 push( _gvn.transform( new ConvL2INode(a))); 2159 break; 2160 case Bytecodes::_i2l: 2161 a = pop(); 2162 b = _gvn.transform( new ConvI2LNode(a)); 2163 push_pair(b); 2164 break; 2165 case Bytecodes::_i2b: 2166 // Sign extend 2167 a = pop(); 2168 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); 2169 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); 2170 push( a ); 2171 break; 2172 case Bytecodes::_i2s: 2173 a = pop(); 2174 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); 2175 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); 2176 push( a ); 2177 break; 2178 case Bytecodes::_i2c: 2179 a = pop(); 2180 push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); 2181 break; 2182 2183 case Bytecodes::_i2f: 2184 a = pop(); 2185 b = _gvn.transform( new ConvI2FNode(a) ) ; 2186 c = precision_rounding(b); 2187 push (b); 2188 break; 2189 2190 case Bytecodes::_i2d: 2191 a = pop(); 2192 b = _gvn.transform( new ConvI2DNode(a)); 2193 push_pair(b); 2194 break; 2195 2196 case Bytecodes::_iinc: // Increment local 2197 i = iter().get_index(); // Get local index 2198 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 2199 break; 2200 2201 // Exit points of synchronized methods must have an unlock node 2202 case Bytecodes::_return: 2203 return_current(NULL); 2204 break; 2205 2206 case Bytecodes::_ireturn: 2207 case Bytecodes::_areturn: 2208 case Bytecodes::_freturn: 2209 return_current(pop()); 2210 break; 2211 case Bytecodes::_lreturn: 2212 return_current(pop_pair()); 2213 break; 2214 case Bytecodes::_dreturn: 2215 return_current(pop_pair()); 2216 break; 2217 2218 case Bytecodes::_athrow: 2219 // null exception oop throws NULL pointer exception 2220 null_check(peek()); 2221 if (stopped()) return; 2222 // Hook the thrown exception directly to subsequent handlers. 2223 if (BailoutToInterpreterForThrows) { 2224 // Keep method interpreted from now on. 2225 uncommon_trap(Deoptimization::Reason_unhandled, 2226 Deoptimization::Action_make_not_compilable); 2227 return; 2228 } 2229 if (env()->jvmti_can_post_on_exceptions()) { 2230 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 2231 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 2232 } 2233 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 2234 add_exception_state(make_exception_state(peek())); 2235 break; 2236 2237 case Bytecodes::_goto: // fall through 2238 case Bytecodes::_goto_w: { 2239 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 2240 2241 // If this is a backwards branch in the bytecodes, add Safepoint 2242 maybe_add_safepoint(target_bci); 2243 2244 // Update method data 2245 profile_taken_branch(target_bci); 2246 2247 // Merge the current control into the target basic block 2248 merge(target_bci); 2249 2250 // See if we can get some profile data and hand it off to the next block 2251 Block *target_block = block()->successor_for_bci(target_bci); 2252 if (target_block->pred_count() != 1) break; 2253 ciMethodData* methodData = method()->method_data(); 2254 if (!methodData->is_mature()) break; 2255 ciProfileData* data = methodData->bci_to_data(bci()); 2256 assert( data->is_JumpData(), "" ); 2257 int taken = ((ciJumpData*)data)->taken(); 2258 taken = method()->scale_count(taken); 2259 target_block->set_count(taken); 2260 break; 2261 } 2262 2263 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 2264 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 2265 handle_if_null: 2266 // If this is a backwards branch in the bytecodes, add Safepoint 2267 maybe_add_safepoint(iter().get_dest()); 2268 a = null(); 2269 b = pop(); 2270 if (!_gvn.type(b)->speculative_maybe_null() && 2271 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 2272 inc_sp(1); 2273 Node* null_ctl = top(); 2274 b = null_check_oop(b, &null_ctl, true, true, true); 2275 assert(null_ctl->is_top(), "no null control here"); 2276 dec_sp(1); 2277 } 2278 c = _gvn.transform( new CmpPNode(b, a) ); 2279 do_ifnull(btest, c); 2280 break; 2281 2282 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 2283 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 2284 handle_if_acmp: 2285 // If this is a backwards branch in the bytecodes, add Safepoint 2286 maybe_add_safepoint(iter().get_dest()); 2287 a = pop(); 2288 b = pop(); 2289 c = _gvn.transform( new CmpPNode(b, a) ); 2290 c = optimize_cmp_with_klass(c); 2291 do_if(btest, c); 2292 break; 2293 2294 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 2295 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 2296 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 2297 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 2298 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 2299 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 2300 handle_ifxx: 2301 // If this is a backwards branch in the bytecodes, add Safepoint 2302 maybe_add_safepoint(iter().get_dest()); 2303 a = _gvn.intcon(0); 2304 b = pop(); 2305 c = _gvn.transform( new CmpINode(b, a) ); 2306 do_if(btest, c); 2307 break; 2308 2309 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 2310 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 2311 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 2312 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 2313 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 2314 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 2315 handle_if_icmp: 2316 // If this is a backwards branch in the bytecodes, add Safepoint 2317 maybe_add_safepoint(iter().get_dest()); 2318 a = pop(); 2319 b = pop(); 2320 c = _gvn.transform( new CmpINode( b, a ) ); 2321 do_if(btest, c); 2322 break; 2323 2324 case Bytecodes::_tableswitch: 2325 do_tableswitch(); 2326 break; 2327 2328 case Bytecodes::_lookupswitch: 2329 do_lookupswitch(); 2330 break; 2331 2332 case Bytecodes::_invokestatic: 2333 case Bytecodes::_invokedynamic: 2334 case Bytecodes::_invokespecial: 2335 case Bytecodes::_invokevirtual: 2336 case Bytecodes::_invokeinterface: 2337 do_call(); 2338 break; 2339 case Bytecodes::_checkcast: 2340 do_checkcast(); 2341 break; 2342 case Bytecodes::_instanceof: 2343 do_instanceof(); 2344 break; 2345 case Bytecodes::_anewarray: 2346 do_anewarray(); 2347 break; 2348 case Bytecodes::_newarray: 2349 do_newarray((BasicType)iter().get_index()); 2350 break; 2351 case Bytecodes::_multianewarray: 2352 do_multianewarray(); 2353 break; 2354 case Bytecodes::_new: 2355 do_new(); 2356 break; 2357 2358 case Bytecodes::_jsr: 2359 case Bytecodes::_jsr_w: 2360 do_jsr(); 2361 break; 2362 2363 case Bytecodes::_ret: 2364 do_ret(); 2365 break; 2366 2367 2368 case Bytecodes::_monitorenter: 2369 do_monitor_enter(); 2370 break; 2371 2372 case Bytecodes::_monitorexit: 2373 do_monitor_exit(); 2374 break; 2375 2376 case Bytecodes::_breakpoint: 2377 // Breakpoint set concurrently to compile 2378 // %%% use an uncommon trap? 2379 C->record_failure("breakpoint in method"); 2380 return; 2381 2382 default: 2383 #ifndef PRODUCT 2384 map()->dump(99); 2385 #endif 2386 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 2387 ShouldNotReachHere(); 2388 } 2389 2390 #ifndef PRODUCT 2391 IdealGraphPrinter *printer = C->printer(); 2392 if (printer && printer->should_print(1)) { 2393 char buffer[256]; 2394 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 2395 bool old = printer->traverse_outs(); 2396 printer->set_traverse_outs(true); 2397 printer->print_method(buffer, 4); 2398 printer->set_traverse_outs(old); 2399 } 2400 #endif 2401 }