1 /* 2 * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/universe.inline.hpp" 32 #include "opto/addnode.hpp" 33 #include "opto/divnode.hpp" 34 #include "opto/idealGraphPrinter.hpp" 35 #include "opto/matcher.hpp" 36 #include "opto/memnode.hpp" 37 #include "opto/mulnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/runtime.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 43 extern int explicit_null_checks_inserted, 44 explicit_null_checks_elided; 45 46 //---------------------------------array_load---------------------------------- 47 void Parse::array_load(BasicType elem_type) { 48 const Type* elem = Type::TOP; 49 Node* adr = array_addressing(elem_type, 0, &elem); 50 if (stopped()) return; // guaranteed null or range check 51 _sp -= 2; // Pop array and index 52 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 53 Node* ld = make_load(control(), adr, elem, elem_type, adr_type); 54 push(ld); 55 } 56 57 58 //--------------------------------array_store---------------------------------- 59 void Parse::array_store(BasicType elem_type) { 60 Node* adr = array_addressing(elem_type, 1); 61 if (stopped()) return; // guaranteed null or range check 62 Node* val = pop(); 63 _sp -= 2; // Pop array and index 64 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 65 store_to_memory(control(), adr, val, elem_type, adr_type); 66 } 67 68 69 //------------------------------array_addressing------------------------------- 70 // Pull array and index from the stack. Compute pointer-to-element. 71 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 72 Node *idx = peek(0+vals); // Get from stack without popping 73 Node *ary = peek(1+vals); // in case of exception 74 75 // Null check the array base, with correct stack contents 76 ary = do_null_check(ary, T_ARRAY); 77 // Compile-time detect of null-exception? 78 if (stopped()) return top(); 79 80 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 81 const TypeInt* sizetype = arytype->size(); 82 const Type* elemtype = arytype->elem(); 83 84 if (UseUniqueSubclasses && result2 != NULL) { 85 const Type* el = elemtype->make_ptr(); 86 if (el && el->isa_instptr()) { 87 const TypeInstPtr* toop = el->is_instptr(); 88 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 89 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 90 const Type* subklass = Type::get_const_type(toop->klass()); 91 elemtype = subklass->join(el); 92 } 93 } 94 } 95 96 // Check for big class initializers with all constant offsets 97 // feeding into a known-size array. 98 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 99 // See if the highest idx value is less than the lowest array bound, 100 // and if the idx value cannot be negative: 101 bool need_range_check = true; 102 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 103 need_range_check = false; 104 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 105 } 106 107 if (!arytype->klass()->is_loaded()) { 108 // Only fails for some -Xcomp runs 109 // The class is unloaded. We have to run this bytecode in the interpreter. 110 uncommon_trap(Deoptimization::Reason_unloaded, 111 Deoptimization::Action_reinterpret, 112 arytype->klass(), "!loaded array"); 113 return top(); 114 } 115 116 // Do the range check 117 if (GenerateRangeChecks && need_range_check) { 118 Node* tst; 119 if (sizetype->_hi <= 0) { 120 // The greatest array bound is negative, so we can conclude that we're 121 // compiling unreachable code, but the unsigned compare trick used below 122 // only works with non-negative lengths. Instead, hack "tst" to be zero so 123 // the uncommon_trap path will always be taken. 124 tst = _gvn.intcon(0); 125 } else { 126 // Range is constant in array-oop, so we can use the original state of mem 127 Node* len = load_array_length(ary); 128 129 // Test length vs index (standard trick using unsigned compare) 130 Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) ); 131 BoolTest::mask btest = BoolTest::lt; 132 tst = _gvn.transform( new (C, 2) BoolNode(chk, btest) ); 133 } 134 // Branch to failure if out of bounds 135 { BuildCutout unless(this, tst, PROB_MAX); 136 if (C->allow_range_check_smearing()) { 137 // Do not use builtin_throw, since range checks are sometimes 138 // made more stringent by an optimistic transformation. 139 // This creates "tentative" range checks at this point, 140 // which are not guaranteed to throw exceptions. 141 // See IfNode::Ideal, is_range_check, adjust_check. 142 uncommon_trap(Deoptimization::Reason_range_check, 143 Deoptimization::Action_make_not_entrant, 144 NULL, "range_check"); 145 } else { 146 // If we have already recompiled with the range-check-widening 147 // heroic optimization turned off, then we must really be throwing 148 // range check exceptions. 149 builtin_throw(Deoptimization::Reason_range_check, idx); 150 } 151 } 152 } 153 // Check for always knowing you are throwing a range-check exception 154 if (stopped()) return top(); 155 156 Node* ptr = array_element_address(ary, idx, type, sizetype); 157 158 if (result2 != NULL) *result2 = elemtype; 159 160 assert(ptr != top(), "top should go hand-in-hand with stopped"); 161 162 return ptr; 163 } 164 165 166 // returns IfNode 167 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) { 168 Node *cmp = _gvn.transform( new (C, 3) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 169 Node *tst = _gvn.transform( new (C, 2) BoolNode( cmp, mask)); 170 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN ); 171 return iff; 172 } 173 174 // return Region node 175 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 176 Node *region = new (C, 3) RegionNode(3); // 2 results 177 record_for_igvn(region); 178 region->init_req(1, iffalse); 179 region->init_req(2, iftrue ); 180 _gvn.set_type(region, Type::CONTROL); 181 region = _gvn.transform(region); 182 set_control (region); 183 return region; 184 } 185 186 187 //------------------------------helper for tableswitch------------------------- 188 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 189 // True branch, use existing map info 190 { PreserveJVMState pjvms(this); 191 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) ); 192 set_control( iftrue ); 193 profile_switch_case(prof_table_index); 194 merge_new_path(dest_bci_if_true); 195 } 196 197 // False branch 198 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) ); 199 set_control( iffalse ); 200 } 201 202 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 203 // True branch, use existing map info 204 { PreserveJVMState pjvms(this); 205 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode (iff) ); 206 set_control( iffalse ); 207 profile_switch_case(prof_table_index); 208 merge_new_path(dest_bci_if_true); 209 } 210 211 // False branch 212 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff) ); 213 set_control( iftrue ); 214 } 215 216 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) { 217 // False branch, use existing map and control() 218 profile_switch_case(prof_table_index); 219 merge_new_path(dest_bci); 220 } 221 222 223 extern "C" { 224 static int jint_cmp(const void *i, const void *j) { 225 int a = *(jint *)i; 226 int b = *(jint *)j; 227 return a > b ? 1 : a < b ? -1 : 0; 228 } 229 } 230 231 232 // Default value for methodData switch indexing. Must be a negative value to avoid 233 // conflict with any legal switch index. 234 #define NullTableIndex -1 235 236 class SwitchRange : public StackObj { 237 // a range of integers coupled with a bci destination 238 jint _lo; // inclusive lower limit 239 jint _hi; // inclusive upper limit 240 int _dest; 241 int _table_index; // index into method data table 242 243 public: 244 jint lo() const { return _lo; } 245 jint hi() const { return _hi; } 246 int dest() const { return _dest; } 247 int table_index() const { return _table_index; } 248 bool is_singleton() const { return _lo == _hi; } 249 250 void setRange(jint lo, jint hi, int dest, int table_index) { 251 assert(lo <= hi, "must be a non-empty range"); 252 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; 253 } 254 bool adjoinRange(jint lo, jint hi, int dest, int table_index) { 255 assert(lo <= hi, "must be a non-empty range"); 256 if (lo == _hi+1 && dest == _dest && table_index == _table_index) { 257 _hi = hi; 258 return true; 259 } 260 return false; 261 } 262 263 void set (jint value, int dest, int table_index) { 264 setRange(value, value, dest, table_index); 265 } 266 bool adjoin(jint value, int dest, int table_index) { 267 return adjoinRange(value, value, dest, table_index); 268 } 269 270 void print(ciEnv* env) { 271 if (is_singleton()) 272 tty->print(" {%d}=>%d", lo(), dest()); 273 else if (lo() == min_jint) 274 tty->print(" {..%d}=>%d", hi(), dest()); 275 else if (hi() == max_jint) 276 tty->print(" {%d..}=>%d", lo(), dest()); 277 else 278 tty->print(" {%d..%d}=>%d", lo(), hi(), dest()); 279 } 280 }; 281 282 283 //-------------------------------do_tableswitch-------------------------------- 284 void Parse::do_tableswitch() { 285 Node* lookup = pop(); 286 287 // Get information about tableswitch 288 int default_dest = iter().get_dest_table(0); 289 int lo_index = iter().get_int_table(1); 290 int hi_index = iter().get_int_table(2); 291 int len = hi_index - lo_index + 1; 292 293 if (len < 1) { 294 // If this is a backward branch, add safepoint 295 maybe_add_safepoint(default_dest); 296 merge(default_dest); 297 return; 298 } 299 300 // generate decision tree, using trichotomy when possible 301 int rnum = len+2; 302 bool makes_backward_branch = false; 303 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 304 int rp = -1; 305 if (lo_index != min_jint) { 306 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex); 307 } 308 for (int j = 0; j < len; j++) { 309 jint match_int = lo_index+j; 310 int dest = iter().get_dest_table(j+3); 311 makes_backward_branch |= (dest <= bci()); 312 int table_index = method_data_update() ? j : NullTableIndex; 313 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) { 314 ranges[++rp].set(match_int, dest, table_index); 315 } 316 } 317 jint highest = lo_index+(len-1); 318 assert(ranges[rp].hi() == highest, ""); 319 if (highest != max_jint 320 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) { 321 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 322 } 323 assert(rp < len+2, "not too many ranges"); 324 325 // Safepoint in case if backward branch observed 326 if( makes_backward_branch && UseLoopSafepoints ) 327 add_safepoint(); 328 329 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 330 } 331 332 333 //------------------------------do_lookupswitch-------------------------------- 334 void Parse::do_lookupswitch() { 335 Node *lookup = pop(); // lookup value 336 // Get information about lookupswitch 337 int default_dest = iter().get_dest_table(0); 338 int len = iter().get_int_table(1); 339 340 if (len < 1) { // If this is a backward branch, add safepoint 341 maybe_add_safepoint(default_dest); 342 merge(default_dest); 343 return; 344 } 345 346 // generate decision tree, using trichotomy when possible 347 jint* table = NEW_RESOURCE_ARRAY(jint, len*2); 348 { 349 for( int j = 0; j < len; j++ ) { 350 table[j+j+0] = iter().get_int_table(2+j+j); 351 table[j+j+1] = iter().get_dest_table(2+j+j+1); 352 } 353 qsort( table, len, 2*sizeof(table[0]), jint_cmp ); 354 } 355 356 int rnum = len*2+1; 357 bool makes_backward_branch = false; 358 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 359 int rp = -1; 360 for( int j = 0; j < len; j++ ) { 361 jint match_int = table[j+j+0]; 362 int dest = table[j+j+1]; 363 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 364 int table_index = method_data_update() ? j : NullTableIndex; 365 makes_backward_branch |= (dest <= bci()); 366 if( match_int != next_lo ) { 367 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex); 368 } 369 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) { 370 ranges[++rp].set(match_int, dest, table_index); 371 } 372 } 373 jint highest = table[2*(len-1)]; 374 assert(ranges[rp].hi() == highest, ""); 375 if( highest != max_jint 376 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) { 377 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 378 } 379 assert(rp < rnum, "not too many ranges"); 380 381 // Safepoint in case backward branch observed 382 if( makes_backward_branch && UseLoopSafepoints ) 383 add_safepoint(); 384 385 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 386 } 387 388 //----------------------------create_jump_tables------------------------------- 389 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 390 // Are jumptables enabled 391 if (!UseJumpTables) return false; 392 393 // Are jumptables supported 394 if (!Matcher::has_match_rule(Op_Jump)) return false; 395 396 // Don't make jump table if profiling 397 if (method_data_update()) return false; 398 399 // Decide if a guard is needed to lop off big ranges at either (or 400 // both) end(s) of the input set. We'll call this the default target 401 // even though we can't be sure that it is the true "default". 402 403 bool needs_guard = false; 404 int default_dest; 405 int64 total_outlier_size = 0; 406 int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1; 407 int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1; 408 409 if (lo->dest() == hi->dest()) { 410 total_outlier_size = hi_size + lo_size; 411 default_dest = lo->dest(); 412 } else if (lo_size > hi_size) { 413 total_outlier_size = lo_size; 414 default_dest = lo->dest(); 415 } else { 416 total_outlier_size = hi_size; 417 default_dest = hi->dest(); 418 } 419 420 // If a guard test will eliminate very sparse end ranges, then 421 // it is worth the cost of an extra jump. 422 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 423 needs_guard = true; 424 if (default_dest == lo->dest()) lo++; 425 if (default_dest == hi->dest()) hi--; 426 } 427 428 // Find the total number of cases and ranges 429 int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1; 430 int num_range = hi - lo + 1; 431 432 // Don't create table if: too large, too small, or too sparse. 433 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize) 434 return false; 435 if (num_cases > (MaxJumpTableSparseness * num_range)) 436 return false; 437 438 // Normalize table lookups to zero 439 int lowval = lo->lo(); 440 key_val = _gvn.transform( new (C, 3) SubINode(key_val, _gvn.intcon(lowval)) ); 441 442 // Generate a guard to protect against input keyvals that aren't 443 // in the switch domain. 444 if (needs_guard) { 445 Node* size = _gvn.intcon(num_cases); 446 Node* cmp = _gvn.transform( new (C, 3) CmpUNode(key_val, size) ); 447 Node* tst = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ge) ); 448 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN); 449 jump_if_true_fork(iff, default_dest, NullTableIndex); 450 } 451 452 // Create an ideal node JumpTable that has projections 453 // of all possible ranges for a switch statement 454 // The key_val input must be converted to a pointer offset and scaled. 455 // Compare Parse::array_addressing above. 456 #ifdef _LP64 457 // Clean the 32-bit int into a real 64-bit offset. 458 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 459 const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin); 460 key_val = _gvn.transform( new (C, 2) ConvI2LNode(key_val, lkeytype) ); 461 #endif 462 // Shift the value by wordsize so we have an index into the table, rather 463 // than a switch value 464 Node *shiftWord = _gvn.MakeConX(wordSize); 465 key_val = _gvn.transform( new (C, 3) MulXNode( key_val, shiftWord)); 466 467 // Create the JumpNode 468 Node* jtn = _gvn.transform( new (C, 2) JumpNode(control(), key_val, num_cases) ); 469 470 // These are the switch destinations hanging off the jumpnode 471 int i = 0; 472 for (SwitchRange* r = lo; r <= hi; r++) { 473 for (int j = r->lo(); j <= r->hi(); j++, i++) { 474 Node* input = _gvn.transform(new (C, 1) JumpProjNode(jtn, i, r->dest(), j - lowval)); 475 { 476 PreserveJVMState pjvms(this); 477 set_control(input); 478 jump_if_always_fork(r->dest(), r->table_index()); 479 } 480 } 481 } 482 assert(i == num_cases, "miscount of cases"); 483 stop_and_kill_map(); // no more uses for this JVMS 484 return true; 485 } 486 487 //----------------------------jump_switch_ranges------------------------------- 488 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 489 Block* switch_block = block(); 490 491 if (switch_depth == 0) { 492 // Do special processing for the top-level call. 493 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 494 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 495 496 // Decrement pred-numbers for the unique set of nodes. 497 #ifdef ASSERT 498 // Ensure that the block's successors are a (duplicate-free) set. 499 int successors_counted = 0; // block occurrences in [hi..lo] 500 int unique_successors = switch_block->num_successors(); 501 for (int i = 0; i < unique_successors; i++) { 502 Block* target = switch_block->successor_at(i); 503 504 // Check that the set of successors is the same in both places. 505 int successors_found = 0; 506 for (SwitchRange* p = lo; p <= hi; p++) { 507 if (p->dest() == target->start()) successors_found++; 508 } 509 assert(successors_found > 0, "successor must be known"); 510 successors_counted += successors_found; 511 } 512 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 513 #endif 514 515 // Maybe prune the inputs, based on the type of key_val. 516 jint min_val = min_jint; 517 jint max_val = max_jint; 518 const TypeInt* ti = key_val->bottom_type()->isa_int(); 519 if (ti != NULL) { 520 min_val = ti->_lo; 521 max_val = ti->_hi; 522 assert(min_val <= max_val, "invalid int type"); 523 } 524 while (lo->hi() < min_val) lo++; 525 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index()); 526 while (hi->lo() > max_val) hi--; 527 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index()); 528 } 529 530 #ifndef PRODUCT 531 if (switch_depth == 0) { 532 _max_switch_depth = 0; 533 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 534 } 535 #endif 536 537 assert(lo <= hi, "must be a non-empty set of ranges"); 538 if (lo == hi) { 539 jump_if_always_fork(lo->dest(), lo->table_index()); 540 } else { 541 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 542 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 543 544 if (create_jump_tables(key_val, lo, hi)) return; 545 546 int nr = hi - lo + 1; 547 548 SwitchRange* mid = lo + nr/2; 549 // if there is an easy choice, pivot at a singleton: 550 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 551 552 assert(lo < mid && mid <= hi, "good pivot choice"); 553 assert(nr != 2 || mid == hi, "should pick higher of 2"); 554 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 555 556 Node *test_val = _gvn.intcon(mid->lo()); 557 558 if (mid->is_singleton()) { 559 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne); 560 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index()); 561 562 // Special Case: If there are exactly three ranges, and the high 563 // and low range each go to the same place, omit the "gt" test, 564 // since it will not discriminate anything. 565 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest()); 566 if (eq_test_only) { 567 assert(mid == hi-1, ""); 568 } 569 570 // if there is a higher range, test for it and process it: 571 if (mid < hi && !eq_test_only) { 572 // two comparisons of same values--should enable 1 test for 2 branches 573 // Use BoolTest::le instead of BoolTest::gt 574 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le); 575 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_le) ); 576 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_le) ); 577 { PreserveJVMState pjvms(this); 578 set_control(iffalse); 579 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 580 } 581 set_control(iftrue); 582 } 583 584 } else { 585 // mid is a range, not a singleton, so treat mid..hi as a unit 586 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge); 587 588 // if there is a higher range, test for it and process it: 589 if (mid == hi) { 590 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index()); 591 } else { 592 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_ge) ); 593 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_ge) ); 594 { PreserveJVMState pjvms(this); 595 set_control(iftrue); 596 jump_switch_ranges(key_val, mid, hi, switch_depth+1); 597 } 598 set_control(iffalse); 599 } 600 } 601 602 // in any case, process the lower range 603 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 604 } 605 606 // Decrease pred_count for each successor after all is done. 607 if (switch_depth == 0) { 608 int unique_successors = switch_block->num_successors(); 609 for (int i = 0; i < unique_successors; i++) { 610 Block* target = switch_block->successor_at(i); 611 // Throw away the pre-allocated path for each unique successor. 612 target->next_path_num(); 613 } 614 } 615 616 #ifndef PRODUCT 617 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 618 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 619 SwitchRange* r; 620 int nsing = 0; 621 for( r = lo; r <= hi; r++ ) { 622 if( r->is_singleton() ) nsing++; 623 } 624 tty->print(">>> "); 625 _method->print_short_name(); 626 tty->print_cr(" switch decision tree"); 627 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 628 hi-lo+1, nsing, _max_switch_depth, _est_switch_depth); 629 if (_max_switch_depth > _est_switch_depth) { 630 tty->print_cr("******** BAD SWITCH DEPTH ********"); 631 } 632 tty->print(" "); 633 for( r = lo; r <= hi; r++ ) { 634 r->print(env()); 635 } 636 tty->print_cr(""); 637 } 638 #endif 639 } 640 641 void Parse::modf() { 642 Node *f2 = pop(); 643 Node *f1 = pop(); 644 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 645 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 646 "frem", NULL, //no memory effects 647 f1, f2); 648 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0)); 649 650 push(res); 651 } 652 653 void Parse::modd() { 654 Node *d2 = pop_pair(); 655 Node *d1 = pop_pair(); 656 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 657 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 658 "drem", NULL, //no memory effects 659 d1, top(), d2, top()); 660 Node* res_d = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0)); 661 662 #ifdef ASSERT 663 Node* res_top = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 1)); 664 assert(res_top == top(), "second value must be top"); 665 #endif 666 667 push_pair(res_d); 668 } 669 670 void Parse::l2f() { 671 Node* f2 = pop(); 672 Node* f1 = pop(); 673 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 674 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 675 "l2f", NULL, //no memory effects 676 f1, f2); 677 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0)); 678 679 push(res); 680 } 681 682 void Parse::do_irem() { 683 // Must keep both values on the expression-stack during null-check 684 do_null_check(peek(), T_INT); 685 // Compile-time detect of null-exception? 686 if (stopped()) return; 687 688 Node* b = pop(); 689 Node* a = pop(); 690 691 const Type *t = _gvn.type(b); 692 if (t != Type::TOP) { 693 const TypeInt *ti = t->is_int(); 694 if (ti->is_con()) { 695 int divisor = ti->get_con(); 696 // check for positive power of 2 697 if (divisor > 0 && 698 (divisor & ~(divisor-1)) == divisor) { 699 // yes ! 700 Node *mask = _gvn.intcon((divisor - 1)); 701 // Sigh, must handle negative dividends 702 Node *zero = _gvn.intcon(0); 703 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt); 704 Node *iff = _gvn.transform( new (C, 1) IfFalseNode(ifff) ); 705 Node *ift = _gvn.transform( new (C, 1) IfTrueNode (ifff) ); 706 Node *reg = jump_if_join(ift, iff); 707 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 708 // Negative path; negate/and/negate 709 Node *neg = _gvn.transform( new (C, 3) SubINode(zero, a) ); 710 Node *andn= _gvn.transform( new (C, 3) AndINode(neg, mask) ); 711 Node *negn= _gvn.transform( new (C, 3) SubINode(zero, andn) ); 712 phi->init_req(1, negn); 713 // Fast positive case 714 Node *andx = _gvn.transform( new (C, 3) AndINode(a, mask) ); 715 phi->init_req(2, andx); 716 // Push the merge 717 push( _gvn.transform(phi) ); 718 return; 719 } 720 } 721 } 722 // Default case 723 push( _gvn.transform( new (C, 3) ModINode(control(),a,b) ) ); 724 } 725 726 // Handle jsr and jsr_w bytecode 727 void Parse::do_jsr() { 728 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 729 730 // Store information about current state, tagged with new _jsr_bci 731 int return_bci = iter().next_bci(); 732 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 733 734 // Update method data 735 profile_taken_branch(jsr_bci); 736 737 // The way we do things now, there is only one successor block 738 // for the jsr, because the target code is cloned by ciTypeFlow. 739 Block* target = successor_for_bci(jsr_bci); 740 741 // What got pushed? 742 const Type* ret_addr = target->peek(); 743 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 744 745 // Effect on jsr on stack 746 push(_gvn.makecon(ret_addr)); 747 748 // Flow to the jsr. 749 merge(jsr_bci); 750 } 751 752 // Handle ret bytecode 753 void Parse::do_ret() { 754 // Find to whom we return. 755 #if 0 // %%%% MAKE THIS WORK 756 Node* con = local(); 757 const TypePtr* tp = con->bottom_type()->isa_ptr(); 758 assert(tp && tp->singleton(), ""); 759 int return_bci = (int) tp->get_con(); 760 merge(return_bci); 761 #else 762 assert(block()->num_successors() == 1, "a ret can only go one place now"); 763 Block* target = block()->successor_at(0); 764 assert(!target->is_ready(), "our arrival must be expected"); 765 profile_ret(target->flow()->start()); 766 int pnum = target->next_path_num(); 767 merge_common(target, pnum); 768 #endif 769 } 770 771 //--------------------------dynamic_branch_prediction-------------------------- 772 // Try to gather dynamic branch prediction behavior. Return a probability 773 // of the branch being taken and set the "cnt" field. Returns a -1.0 774 // if we need to use static prediction for some reason. 775 float Parse::dynamic_branch_prediction(float &cnt) { 776 ResourceMark rm; 777 778 cnt = COUNT_UNKNOWN; 779 780 // Use MethodData information if it is available 781 // FIXME: free the ProfileData structure 782 ciMethodData* methodData = method()->method_data(); 783 if (!methodData->is_mature()) return PROB_UNKNOWN; 784 ciProfileData* data = methodData->bci_to_data(bci()); 785 if (!data->is_JumpData()) return PROB_UNKNOWN; 786 787 // get taken and not taken values 788 int taken = data->as_JumpData()->taken(); 789 int not_taken = 0; 790 if (data->is_BranchData()) { 791 not_taken = data->as_BranchData()->not_taken(); 792 } 793 794 // scale the counts to be commensurate with invocation counts: 795 taken = method()->scale_count(taken); 796 not_taken = method()->scale_count(not_taken); 797 798 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 799 // We also check that individual counters are positive first, overwise the sum can become positive. 800 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 801 if (C->log() != NULL) { 802 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 803 } 804 return PROB_UNKNOWN; 805 } 806 807 // Compute frequency that we arrive here 808 float sum = taken + not_taken; 809 // Adjust, if this block is a cloned private block but the 810 // Jump counts are shared. Taken the private counts for 811 // just this path instead of the shared counts. 812 if( block()->count() > 0 ) 813 sum = block()->count(); 814 cnt = sum / FreqCountInvocations; 815 816 // Pin probability to sane limits 817 float prob; 818 if( !taken ) 819 prob = (0+PROB_MIN) / 2; 820 else if( !not_taken ) 821 prob = (1+PROB_MAX) / 2; 822 else { // Compute probability of true path 823 prob = (float)taken / (float)(taken + not_taken); 824 if (prob > PROB_MAX) prob = PROB_MAX; 825 if (prob < PROB_MIN) prob = PROB_MIN; 826 } 827 828 assert((cnt > 0.0f) && (prob > 0.0f), 829 "Bad frequency assignment in if"); 830 831 if (C->log() != NULL) { 832 const char* prob_str = NULL; 833 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 834 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 835 char prob_str_buf[30]; 836 if (prob_str == NULL) { 837 sprintf(prob_str_buf, "%g", prob); 838 prob_str = prob_str_buf; 839 } 840 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'", 841 iter().get_dest(), taken, not_taken, cnt, prob_str); 842 } 843 return prob; 844 } 845 846 //-----------------------------branch_prediction------------------------------- 847 float Parse::branch_prediction(float& cnt, 848 BoolTest::mask btest, 849 int target_bci) { 850 float prob = dynamic_branch_prediction(cnt); 851 // If prob is unknown, switch to static prediction 852 if (prob != PROB_UNKNOWN) return prob; 853 854 prob = PROB_FAIR; // Set default value 855 if (btest == BoolTest::eq) // Exactly equal test? 856 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 857 else if (btest == BoolTest::ne) 858 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 859 860 // If this is a conditional test guarding a backwards branch, 861 // assume its a loop-back edge. Make it a likely taken branch. 862 if (target_bci < bci()) { 863 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 864 // Since it's an OSR, we probably have profile data, but since 865 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 866 // Let's make a special check here for completely zero counts. 867 ciMethodData* methodData = method()->method_data(); 868 if (!methodData->is_empty()) { 869 ciProfileData* data = methodData->bci_to_data(bci()); 870 // Only stop for truly zero counts, which mean an unknown part 871 // of the OSR-ed method, and we want to deopt to gather more stats. 872 // If you have ANY counts, then this loop is simply 'cold' relative 873 // to the OSR loop. 874 if (data->as_BranchData()->taken() + 875 data->as_BranchData()->not_taken() == 0 ) { 876 // This is the only way to return PROB_UNKNOWN: 877 return PROB_UNKNOWN; 878 } 879 } 880 } 881 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 882 } 883 884 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 885 return prob; 886 } 887 888 // The magic constants are chosen so as to match the output of 889 // branch_prediction() when the profile reports a zero taken count. 890 // It is important to distinguish zero counts unambiguously, because 891 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 892 // very small but nonzero probabilities, which if confused with zero 893 // counts would keep the program recompiling indefinitely. 894 bool Parse::seems_never_taken(float prob) { 895 return prob < PROB_MIN; 896 } 897 898 // True if the comparison seems to be the kind that will not change its 899 // statistics from true to false. See comments in adjust_map_after_if. 900 // This question is only asked along paths which are already 901 // classifed as untaken (by seems_never_taken), so really, 902 // if a path is never taken, its controlling comparison is 903 // already acting in a stable fashion. If the comparison 904 // seems stable, we will put an expensive uncommon trap 905 // on the untaken path. To be conservative, and to allow 906 // partially executed counted loops to be compiled fully, 907 // we will plant uncommon traps only after pointer comparisons. 908 bool Parse::seems_stable_comparison(BoolTest::mask btest, Node* cmp) { 909 for (int depth = 4; depth > 0; depth--) { 910 // The following switch can find CmpP here over half the time for 911 // dynamic language code rich with type tests. 912 // Code using counted loops or array manipulations (typical 913 // of benchmarks) will have many (>80%) CmpI instructions. 914 switch (cmp->Opcode()) { 915 case Op_CmpP: 916 // A never-taken null check looks like CmpP/BoolTest::eq. 917 // These certainly should be closed off as uncommon traps. 918 if (btest == BoolTest::eq) 919 return true; 920 // A never-failed type check looks like CmpP/BoolTest::ne. 921 // Let's put traps on those, too, so that we don't have to compile 922 // unused paths with indeterminate dynamic type information. 923 if (ProfileDynamicTypes) 924 return true; 925 return false; 926 927 case Op_CmpI: 928 // A small minority (< 10%) of CmpP are masked as CmpI, 929 // as if by boolean conversion ((p == q? 1: 0) != 0). 930 // Detect that here, even if it hasn't optimized away yet. 931 // Specifically, this covers the 'instanceof' operator. 932 if (btest == BoolTest::ne || btest == BoolTest::eq) { 933 if (_gvn.type(cmp->in(2))->singleton() && 934 cmp->in(1)->is_Phi()) { 935 PhiNode* phi = cmp->in(1)->as_Phi(); 936 int true_path = phi->is_diamond_phi(); 937 if (true_path > 0 && 938 _gvn.type(phi->in(1))->singleton() && 939 _gvn.type(phi->in(2))->singleton()) { 940 // phi->region->if_proj->ifnode->bool->cmp 941 BoolNode* bol = phi->in(0)->in(1)->in(0)->in(1)->as_Bool(); 942 btest = bol->_test._test; 943 cmp = bol->in(1); 944 continue; 945 } 946 } 947 } 948 return false; 949 } 950 } 951 return false; 952 } 953 954 //-------------------------------repush_if_args-------------------------------- 955 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 956 inline int Parse::repush_if_args() { 957 #ifndef PRODUCT 958 if (PrintOpto && WizardMode) { 959 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 960 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 961 method()->print_name(); tty->cr(); 962 } 963 #endif 964 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 965 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 966 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 967 assert(argument(0) != NULL, "must exist"); 968 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 969 _sp += bc_depth; 970 return bc_depth; 971 } 972 973 //----------------------------------do_ifnull---------------------------------- 974 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 975 int target_bci = iter().get_dest(); 976 977 Block* branch_block = successor_for_bci(target_bci); 978 Block* next_block = successor_for_bci(iter().next_bci()); 979 980 float cnt; 981 float prob = branch_prediction(cnt, btest, target_bci); 982 if (prob == PROB_UNKNOWN) { 983 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 984 #ifndef PRODUCT 985 if (PrintOpto && Verbose) 986 tty->print_cr("Never-taken edge stops compilation at bci %d",bci()); 987 #endif 988 repush_if_args(); // to gather stats on loop 989 // We need to mark this branch as taken so that if we recompile we will 990 // see that it is possible. In the tiered system the interpreter doesn't 991 // do profiling and by the time we get to the lower tier from the interpreter 992 // the path may be cold again. Make sure it doesn't look untaken 993 profile_taken_branch(target_bci, !ProfileInterpreter); 994 uncommon_trap(Deoptimization::Reason_unreached, 995 Deoptimization::Action_reinterpret, 996 NULL, "cold"); 997 if (EliminateAutoBox) { 998 // Mark the successor blocks as parsed 999 branch_block->next_path_num(); 1000 next_block->next_path_num(); 1001 } 1002 return; 1003 } 1004 1005 explicit_null_checks_inserted++; 1006 1007 // Generate real control flow 1008 Node *tst = _gvn.transform( new (C, 2) BoolNode( c, btest ) ); 1009 1010 // Sanity check the probability value 1011 assert(prob > 0.0f,"Bad probability in Parser"); 1012 // Need xform to put node in hash table 1013 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1014 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1015 // True branch 1016 { PreserveJVMState pjvms(this); 1017 Node* iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) ); 1018 set_control(iftrue); 1019 1020 if (stopped()) { // Path is dead? 1021 explicit_null_checks_elided++; 1022 if (EliminateAutoBox) { 1023 // Mark the successor block as parsed 1024 branch_block->next_path_num(); 1025 } 1026 } else { // Path is live. 1027 // Update method data 1028 profile_taken_branch(target_bci); 1029 adjust_map_after_if(btest, c, prob, branch_block, next_block); 1030 if (!stopped()) { 1031 merge(target_bci); 1032 } 1033 } 1034 } 1035 1036 // False branch 1037 Node* iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) ); 1038 set_control(iffalse); 1039 1040 if (stopped()) { // Path is dead? 1041 explicit_null_checks_elided++; 1042 if (EliminateAutoBox) { 1043 // Mark the successor block as parsed 1044 next_block->next_path_num(); 1045 } 1046 } else { // Path is live. 1047 // Update method data 1048 profile_not_taken_branch(); 1049 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, 1050 next_block, branch_block); 1051 } 1052 } 1053 1054 //------------------------------------do_if------------------------------------ 1055 void Parse::do_if(BoolTest::mask btest, Node* c) { 1056 int target_bci = iter().get_dest(); 1057 1058 Block* branch_block = successor_for_bci(target_bci); 1059 Block* next_block = successor_for_bci(iter().next_bci()); 1060 1061 float cnt; 1062 float prob = branch_prediction(cnt, btest, target_bci); 1063 float untaken_prob = 1.0 - prob; 1064 1065 if (prob == PROB_UNKNOWN) { 1066 #ifndef PRODUCT 1067 if (PrintOpto && Verbose) 1068 tty->print_cr("Never-taken edge stops compilation at bci %d",bci()); 1069 #endif 1070 repush_if_args(); // to gather stats on loop 1071 // We need to mark this branch as taken so that if we recompile we will 1072 // see that it is possible. In the tiered system the interpreter doesn't 1073 // do profiling and by the time we get to the lower tier from the interpreter 1074 // the path may be cold again. Make sure it doesn't look untaken 1075 profile_taken_branch(target_bci, !ProfileInterpreter); 1076 uncommon_trap(Deoptimization::Reason_unreached, 1077 Deoptimization::Action_reinterpret, 1078 NULL, "cold"); 1079 if (EliminateAutoBox) { 1080 // Mark the successor blocks as parsed 1081 branch_block->next_path_num(); 1082 next_block->next_path_num(); 1083 } 1084 return; 1085 } 1086 1087 // Sanity check the probability value 1088 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1089 1090 bool taken_if_true = true; 1091 // Convert BoolTest to canonical form: 1092 if (!BoolTest(btest).is_canonical()) { 1093 btest = BoolTest(btest).negate(); 1094 taken_if_true = false; 1095 // prob is NOT updated here; it remains the probability of the taken 1096 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1097 } 1098 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1099 1100 Node* tst0 = new (C, 2) BoolNode(c, btest); 1101 Node* tst = _gvn.transform(tst0); 1102 BoolTest::mask taken_btest = BoolTest::illegal; 1103 BoolTest::mask untaken_btest = BoolTest::illegal; 1104 1105 if (tst->is_Bool()) { 1106 // Refresh c from the transformed bool node, since it may be 1107 // simpler than the original c. Also re-canonicalize btest. 1108 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1109 // That can arise from statements like: if (x instanceof C) ... 1110 if (tst != tst0) { 1111 // Canonicalize one more time since transform can change it. 1112 btest = tst->as_Bool()->_test._test; 1113 if (!BoolTest(btest).is_canonical()) { 1114 // Reverse edges one more time... 1115 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1116 btest = tst->as_Bool()->_test._test; 1117 assert(BoolTest(btest).is_canonical(), "sanity"); 1118 taken_if_true = !taken_if_true; 1119 } 1120 c = tst->in(1); 1121 } 1122 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1123 taken_btest = taken_if_true ? btest : neg_btest; 1124 untaken_btest = taken_if_true ? neg_btest : btest; 1125 } 1126 1127 // Generate real control flow 1128 float true_prob = (taken_if_true ? prob : untaken_prob); 1129 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1130 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1131 Node* taken_branch = new (C, 1) IfTrueNode(iff); 1132 Node* untaken_branch = new (C, 1) IfFalseNode(iff); 1133 if (!taken_if_true) { // Finish conversion to canonical form 1134 Node* tmp = taken_branch; 1135 taken_branch = untaken_branch; 1136 untaken_branch = tmp; 1137 } 1138 1139 // Branch is taken: 1140 { PreserveJVMState pjvms(this); 1141 taken_branch = _gvn.transform(taken_branch); 1142 set_control(taken_branch); 1143 1144 if (stopped()) { 1145 if (EliminateAutoBox) { 1146 // Mark the successor block as parsed 1147 branch_block->next_path_num(); 1148 } 1149 } else { 1150 // Update method data 1151 profile_taken_branch(target_bci); 1152 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block); 1153 if (!stopped()) { 1154 merge(target_bci); 1155 } 1156 } 1157 } 1158 1159 untaken_branch = _gvn.transform(untaken_branch); 1160 set_control(untaken_branch); 1161 1162 // Branch not taken. 1163 if (stopped()) { 1164 if (EliminateAutoBox) { 1165 // Mark the successor block as parsed 1166 next_block->next_path_num(); 1167 } 1168 } else { 1169 // Update method data 1170 profile_not_taken_branch(); 1171 adjust_map_after_if(untaken_btest, c, untaken_prob, 1172 next_block, branch_block); 1173 } 1174 } 1175 1176 //----------------------------adjust_map_after_if------------------------------ 1177 // Adjust the JVM state to reflect the result of taking this path. 1178 // Basically, it means inspecting the CmpNode controlling this 1179 // branch, seeing how it constrains a tested value, and then 1180 // deciding if it's worth our while to encode this constraint 1181 // as graph nodes in the current abstract interpretation map. 1182 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, 1183 Block* path, Block* other_path) { 1184 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal) 1185 return; // nothing to do 1186 1187 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 1188 1189 if (seems_never_taken(prob) && seems_stable_comparison(btest, c)) { 1190 // If this might possibly turn into an implicit null check, 1191 // and the null has never yet been seen, we need to generate 1192 // an uncommon trap, so as to recompile instead of suffering 1193 // with very slow branches. (We'll get the slow branches if 1194 // the program ever changes phase and starts seeing nulls here.) 1195 // 1196 // We do not inspect for a null constant, since a node may 1197 // optimize to 'null' later on. 1198 // 1199 // Null checks, and other tests which expect inequality, 1200 // show btest == BoolTest::eq along the non-taken branch. 1201 // On the other hand, type tests, must-be-null tests, 1202 // and other tests which expect pointer equality, 1203 // show btest == BoolTest::ne along the non-taken branch. 1204 // We prune both types of branches if they look unused. 1205 repush_if_args(); 1206 // We need to mark this branch as taken so that if we recompile we will 1207 // see that it is possible. In the tiered system the interpreter doesn't 1208 // do profiling and by the time we get to the lower tier from the interpreter 1209 // the path may be cold again. Make sure it doesn't look untaken 1210 if (is_fallthrough) { 1211 profile_not_taken_branch(!ProfileInterpreter); 1212 } else { 1213 profile_taken_branch(iter().get_dest(), !ProfileInterpreter); 1214 } 1215 uncommon_trap(Deoptimization::Reason_unreached, 1216 Deoptimization::Action_reinterpret, 1217 NULL, 1218 (is_fallthrough ? "taken always" : "taken never")); 1219 return; 1220 } 1221 1222 Node* val = c->in(1); 1223 Node* con = c->in(2); 1224 const Type* tcon = _gvn.type(con); 1225 const Type* tval = _gvn.type(val); 1226 bool have_con = tcon->singleton(); 1227 if (tval->singleton()) { 1228 if (!have_con) { 1229 // Swap, so constant is in con. 1230 con = val; 1231 tcon = tval; 1232 val = c->in(2); 1233 tval = _gvn.type(val); 1234 btest = BoolTest(btest).commute(); 1235 have_con = true; 1236 } else { 1237 // Do we have two constants? Then leave well enough alone. 1238 have_con = false; 1239 } 1240 } 1241 if (!have_con) // remaining adjustments need a con 1242 return; 1243 1244 1245 int val_in_map = map()->find_edge(val); 1246 if (val_in_map < 0) return; // replace_in_map would be useless 1247 { 1248 JVMState* jvms = this->jvms(); 1249 if (!(jvms->is_loc(val_in_map) || 1250 jvms->is_stk(val_in_map))) 1251 return; // again, it would be useless 1252 } 1253 1254 // Check for a comparison to a constant, and "know" that the compared 1255 // value is constrained on this path. 1256 assert(tcon->singleton(), ""); 1257 ConstraintCastNode* ccast = NULL; 1258 Node* cast = NULL; 1259 1260 switch (btest) { 1261 case BoolTest::eq: // Constant test? 1262 { 1263 const Type* tboth = tcon->join(tval); 1264 if (tboth == tval) break; // Nothing to gain. 1265 if (tcon->isa_int()) { 1266 ccast = new (C, 2) CastIINode(val, tboth); 1267 } else if (tcon == TypePtr::NULL_PTR) { 1268 // Cast to null, but keep the pointer identity temporarily live. 1269 ccast = new (C, 2) CastPPNode(val, tboth); 1270 } else { 1271 const TypeF* tf = tcon->isa_float_constant(); 1272 const TypeD* td = tcon->isa_double_constant(); 1273 // Exclude tests vs float/double 0 as these could be 1274 // either +0 or -0. Just because you are equal to +0 1275 // doesn't mean you ARE +0! 1276 if ((!tf || tf->_f != 0.0) && 1277 (!td || td->_d != 0.0)) 1278 cast = con; // Replace non-constant val by con. 1279 } 1280 } 1281 break; 1282 1283 case BoolTest::ne: 1284 if (tcon == TypePtr::NULL_PTR) { 1285 cast = cast_not_null(val, false); 1286 } 1287 break; 1288 1289 default: 1290 // (At this point we could record int range types with CastII.) 1291 break; 1292 } 1293 1294 if (ccast != NULL) { 1295 const Type* tcc = ccast->as_Type()->type(); 1296 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 1297 // Delay transform() call to allow recovery of pre-cast value 1298 // at the control merge. 1299 ccast->set_req(0, control()); 1300 _gvn.set_type_bottom(ccast); 1301 record_for_igvn(ccast); 1302 cast = ccast; 1303 } 1304 1305 if (cast != NULL) { // Here's the payoff. 1306 replace_in_map(val, cast); 1307 } 1308 } 1309 1310 1311 //------------------------------do_one_bytecode-------------------------------- 1312 // Parse this bytecode, and alter the Parsers JVM->Node mapping 1313 void Parse::do_one_bytecode() { 1314 Node *a, *b, *c, *d; // Handy temps 1315 BoolTest::mask btest; 1316 int i; 1317 1318 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 1319 1320 if (C->check_node_count(NodeLimitFudgeFactor * 5, 1321 "out of nodes parsing method")) { 1322 return; 1323 } 1324 1325 #ifdef ASSERT 1326 // for setting breakpoints 1327 if (TraceOptoParse) { 1328 tty->print(" @"); 1329 dump_bci(bci()); 1330 } 1331 #endif 1332 1333 switch (bc()) { 1334 case Bytecodes::_nop: 1335 // do nothing 1336 break; 1337 case Bytecodes::_lconst_0: 1338 push_pair(longcon(0)); 1339 break; 1340 1341 case Bytecodes::_lconst_1: 1342 push_pair(longcon(1)); 1343 break; 1344 1345 case Bytecodes::_fconst_0: 1346 push(zerocon(T_FLOAT)); 1347 break; 1348 1349 case Bytecodes::_fconst_1: 1350 push(makecon(TypeF::ONE)); 1351 break; 1352 1353 case Bytecodes::_fconst_2: 1354 push(makecon(TypeF::make(2.0f))); 1355 break; 1356 1357 case Bytecodes::_dconst_0: 1358 push_pair(zerocon(T_DOUBLE)); 1359 break; 1360 1361 case Bytecodes::_dconst_1: 1362 push_pair(makecon(TypeD::ONE)); 1363 break; 1364 1365 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 1366 case Bytecodes::_iconst_0: push(intcon( 0)); break; 1367 case Bytecodes::_iconst_1: push(intcon( 1)); break; 1368 case Bytecodes::_iconst_2: push(intcon( 2)); break; 1369 case Bytecodes::_iconst_3: push(intcon( 3)); break; 1370 case Bytecodes::_iconst_4: push(intcon( 4)); break; 1371 case Bytecodes::_iconst_5: push(intcon( 5)); break; 1372 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 1373 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 1374 case Bytecodes::_aconst_null: push(null()); break; 1375 case Bytecodes::_ldc: 1376 case Bytecodes::_ldc_w: 1377 case Bytecodes::_ldc2_w: 1378 // If the constant is unresolved, run this BC once in the interpreter. 1379 { 1380 ciConstant constant = iter().get_constant(); 1381 if (constant.basic_type() == T_OBJECT && 1382 !constant.as_object()->is_loaded()) { 1383 int index = iter().get_constant_pool_index(); 1384 constantTag tag = iter().get_constant_pool_tag(index); 1385 uncommon_trap(Deoptimization::make_trap_request 1386 (Deoptimization::Reason_unloaded, 1387 Deoptimization::Action_reinterpret, 1388 index), 1389 NULL, tag.internal_name()); 1390 break; 1391 } 1392 assert(constant.basic_type() != T_OBJECT || !constant.as_object()->is_klass(), 1393 "must be java_mirror of klass"); 1394 bool pushed = push_constant(constant, true); 1395 guarantee(pushed, "must be possible to push this constant"); 1396 } 1397 1398 break; 1399 1400 case Bytecodes::_aload_0: 1401 push( local(0) ); 1402 break; 1403 case Bytecodes::_aload_1: 1404 push( local(1) ); 1405 break; 1406 case Bytecodes::_aload_2: 1407 push( local(2) ); 1408 break; 1409 case Bytecodes::_aload_3: 1410 push( local(3) ); 1411 break; 1412 case Bytecodes::_aload: 1413 push( local(iter().get_index()) ); 1414 break; 1415 1416 case Bytecodes::_fload_0: 1417 case Bytecodes::_iload_0: 1418 push( local(0) ); 1419 break; 1420 case Bytecodes::_fload_1: 1421 case Bytecodes::_iload_1: 1422 push( local(1) ); 1423 break; 1424 case Bytecodes::_fload_2: 1425 case Bytecodes::_iload_2: 1426 push( local(2) ); 1427 break; 1428 case Bytecodes::_fload_3: 1429 case Bytecodes::_iload_3: 1430 push( local(3) ); 1431 break; 1432 case Bytecodes::_fload: 1433 case Bytecodes::_iload: 1434 push( local(iter().get_index()) ); 1435 break; 1436 case Bytecodes::_lload_0: 1437 push_pair_local( 0 ); 1438 break; 1439 case Bytecodes::_lload_1: 1440 push_pair_local( 1 ); 1441 break; 1442 case Bytecodes::_lload_2: 1443 push_pair_local( 2 ); 1444 break; 1445 case Bytecodes::_lload_3: 1446 push_pair_local( 3 ); 1447 break; 1448 case Bytecodes::_lload: 1449 push_pair_local( iter().get_index() ); 1450 break; 1451 1452 case Bytecodes::_dload_0: 1453 push_pair_local(0); 1454 break; 1455 case Bytecodes::_dload_1: 1456 push_pair_local(1); 1457 break; 1458 case Bytecodes::_dload_2: 1459 push_pair_local(2); 1460 break; 1461 case Bytecodes::_dload_3: 1462 push_pair_local(3); 1463 break; 1464 case Bytecodes::_dload: 1465 push_pair_local(iter().get_index()); 1466 break; 1467 case Bytecodes::_fstore_0: 1468 case Bytecodes::_istore_0: 1469 case Bytecodes::_astore_0: 1470 set_local( 0, pop() ); 1471 break; 1472 case Bytecodes::_fstore_1: 1473 case Bytecodes::_istore_1: 1474 case Bytecodes::_astore_1: 1475 set_local( 1, pop() ); 1476 break; 1477 case Bytecodes::_fstore_2: 1478 case Bytecodes::_istore_2: 1479 case Bytecodes::_astore_2: 1480 set_local( 2, pop() ); 1481 break; 1482 case Bytecodes::_fstore_3: 1483 case Bytecodes::_istore_3: 1484 case Bytecodes::_astore_3: 1485 set_local( 3, pop() ); 1486 break; 1487 case Bytecodes::_fstore: 1488 case Bytecodes::_istore: 1489 case Bytecodes::_astore: 1490 set_local( iter().get_index(), pop() ); 1491 break; 1492 // long stores 1493 case Bytecodes::_lstore_0: 1494 set_pair_local( 0, pop_pair() ); 1495 break; 1496 case Bytecodes::_lstore_1: 1497 set_pair_local( 1, pop_pair() ); 1498 break; 1499 case Bytecodes::_lstore_2: 1500 set_pair_local( 2, pop_pair() ); 1501 break; 1502 case Bytecodes::_lstore_3: 1503 set_pair_local( 3, pop_pair() ); 1504 break; 1505 case Bytecodes::_lstore: 1506 set_pair_local( iter().get_index(), pop_pair() ); 1507 break; 1508 1509 // double stores 1510 case Bytecodes::_dstore_0: 1511 set_pair_local( 0, dstore_rounding(pop_pair()) ); 1512 break; 1513 case Bytecodes::_dstore_1: 1514 set_pair_local( 1, dstore_rounding(pop_pair()) ); 1515 break; 1516 case Bytecodes::_dstore_2: 1517 set_pair_local( 2, dstore_rounding(pop_pair()) ); 1518 break; 1519 case Bytecodes::_dstore_3: 1520 set_pair_local( 3, dstore_rounding(pop_pair()) ); 1521 break; 1522 case Bytecodes::_dstore: 1523 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 1524 break; 1525 1526 case Bytecodes::_pop: _sp -= 1; break; 1527 case Bytecodes::_pop2: _sp -= 2; break; 1528 case Bytecodes::_swap: 1529 a = pop(); 1530 b = pop(); 1531 push(a); 1532 push(b); 1533 break; 1534 case Bytecodes::_dup: 1535 a = pop(); 1536 push(a); 1537 push(a); 1538 break; 1539 case Bytecodes::_dup_x1: 1540 a = pop(); 1541 b = pop(); 1542 push( a ); 1543 push( b ); 1544 push( a ); 1545 break; 1546 case Bytecodes::_dup_x2: 1547 a = pop(); 1548 b = pop(); 1549 c = pop(); 1550 push( a ); 1551 push( c ); 1552 push( b ); 1553 push( a ); 1554 break; 1555 case Bytecodes::_dup2: 1556 a = pop(); 1557 b = pop(); 1558 push( b ); 1559 push( a ); 1560 push( b ); 1561 push( a ); 1562 break; 1563 1564 case Bytecodes::_dup2_x1: 1565 // before: .. c, b, a 1566 // after: .. b, a, c, b, a 1567 // not tested 1568 a = pop(); 1569 b = pop(); 1570 c = pop(); 1571 push( b ); 1572 push( a ); 1573 push( c ); 1574 push( b ); 1575 push( a ); 1576 break; 1577 case Bytecodes::_dup2_x2: 1578 // before: .. d, c, b, a 1579 // after: .. b, a, d, c, b, a 1580 // not tested 1581 a = pop(); 1582 b = pop(); 1583 c = pop(); 1584 d = pop(); 1585 push( b ); 1586 push( a ); 1587 push( d ); 1588 push( c ); 1589 push( b ); 1590 push( a ); 1591 break; 1592 1593 case Bytecodes::_arraylength: { 1594 // Must do null-check with value on expression stack 1595 Node *ary = do_null_check(peek(), T_ARRAY); 1596 // Compile-time detect of null-exception? 1597 if (stopped()) return; 1598 a = pop(); 1599 push(load_array_length(a)); 1600 break; 1601 } 1602 1603 case Bytecodes::_baload: array_load(T_BYTE); break; 1604 case Bytecodes::_caload: array_load(T_CHAR); break; 1605 case Bytecodes::_iaload: array_load(T_INT); break; 1606 case Bytecodes::_saload: array_load(T_SHORT); break; 1607 case Bytecodes::_faload: array_load(T_FLOAT); break; 1608 case Bytecodes::_aaload: array_load(T_OBJECT); break; 1609 case Bytecodes::_laload: { 1610 a = array_addressing(T_LONG, 0); 1611 if (stopped()) return; // guaranteed null or range check 1612 _sp -= 2; // Pop array and index 1613 push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS)); 1614 break; 1615 } 1616 case Bytecodes::_daload: { 1617 a = array_addressing(T_DOUBLE, 0); 1618 if (stopped()) return; // guaranteed null or range check 1619 _sp -= 2; // Pop array and index 1620 push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES)); 1621 break; 1622 } 1623 case Bytecodes::_bastore: array_store(T_BYTE); break; 1624 case Bytecodes::_castore: array_store(T_CHAR); break; 1625 case Bytecodes::_iastore: array_store(T_INT); break; 1626 case Bytecodes::_sastore: array_store(T_SHORT); break; 1627 case Bytecodes::_fastore: array_store(T_FLOAT); break; 1628 case Bytecodes::_aastore: { 1629 d = array_addressing(T_OBJECT, 1); 1630 if (stopped()) return; // guaranteed null or range check 1631 array_store_check(); 1632 c = pop(); // Oop to store 1633 b = pop(); // index (already used) 1634 a = pop(); // the array itself 1635 const TypeOopPtr* elemtype = _gvn.type(a)->is_aryptr()->elem()->make_oopptr(); 1636 const TypeAryPtr* adr_type = TypeAryPtr::OOPS; 1637 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT); 1638 break; 1639 } 1640 case Bytecodes::_lastore: { 1641 a = array_addressing(T_LONG, 2); 1642 if (stopped()) return; // guaranteed null or range check 1643 c = pop_pair(); 1644 _sp -= 2; // Pop array and index 1645 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS); 1646 break; 1647 } 1648 case Bytecodes::_dastore: { 1649 a = array_addressing(T_DOUBLE, 2); 1650 if (stopped()) return; // guaranteed null or range check 1651 c = pop_pair(); 1652 _sp -= 2; // Pop array and index 1653 c = dstore_rounding(c); 1654 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES); 1655 break; 1656 } 1657 case Bytecodes::_getfield: 1658 do_getfield(); 1659 break; 1660 1661 case Bytecodes::_getstatic: 1662 do_getstatic(); 1663 break; 1664 1665 case Bytecodes::_putfield: 1666 do_putfield(); 1667 break; 1668 1669 case Bytecodes::_putstatic: 1670 do_putstatic(); 1671 break; 1672 1673 case Bytecodes::_irem: 1674 do_irem(); 1675 break; 1676 case Bytecodes::_idiv: 1677 // Must keep both values on the expression-stack during null-check 1678 do_null_check(peek(), T_INT); 1679 // Compile-time detect of null-exception? 1680 if (stopped()) return; 1681 b = pop(); 1682 a = pop(); 1683 push( _gvn.transform( new (C, 3) DivINode(control(),a,b) ) ); 1684 break; 1685 case Bytecodes::_imul: 1686 b = pop(); a = pop(); 1687 push( _gvn.transform( new (C, 3) MulINode(a,b) ) ); 1688 break; 1689 case Bytecodes::_iadd: 1690 b = pop(); a = pop(); 1691 push( _gvn.transform( new (C, 3) AddINode(a,b) ) ); 1692 break; 1693 case Bytecodes::_ineg: 1694 a = pop(); 1695 push( _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),a)) ); 1696 break; 1697 case Bytecodes::_isub: 1698 b = pop(); a = pop(); 1699 push( _gvn.transform( new (C, 3) SubINode(a,b) ) ); 1700 break; 1701 case Bytecodes::_iand: 1702 b = pop(); a = pop(); 1703 push( _gvn.transform( new (C, 3) AndINode(a,b) ) ); 1704 break; 1705 case Bytecodes::_ior: 1706 b = pop(); a = pop(); 1707 push( _gvn.transform( new (C, 3) OrINode(a,b) ) ); 1708 break; 1709 case Bytecodes::_ixor: 1710 b = pop(); a = pop(); 1711 push( _gvn.transform( new (C, 3) XorINode(a,b) ) ); 1712 break; 1713 case Bytecodes::_ishl: 1714 b = pop(); a = pop(); 1715 push( _gvn.transform( new (C, 3) LShiftINode(a,b) ) ); 1716 break; 1717 case Bytecodes::_ishr: 1718 b = pop(); a = pop(); 1719 push( _gvn.transform( new (C, 3) RShiftINode(a,b) ) ); 1720 break; 1721 case Bytecodes::_iushr: 1722 b = pop(); a = pop(); 1723 push( _gvn.transform( new (C, 3) URShiftINode(a,b) ) ); 1724 break; 1725 1726 case Bytecodes::_fneg: 1727 a = pop(); 1728 b = _gvn.transform(new (C, 2) NegFNode (a)); 1729 push(b); 1730 break; 1731 1732 case Bytecodes::_fsub: 1733 b = pop(); 1734 a = pop(); 1735 c = _gvn.transform( new (C, 3) SubFNode(a,b) ); 1736 d = precision_rounding(c); 1737 push( d ); 1738 break; 1739 1740 case Bytecodes::_fadd: 1741 b = pop(); 1742 a = pop(); 1743 c = _gvn.transform( new (C, 3) AddFNode(a,b) ); 1744 d = precision_rounding(c); 1745 push( d ); 1746 break; 1747 1748 case Bytecodes::_fmul: 1749 b = pop(); 1750 a = pop(); 1751 c = _gvn.transform( new (C, 3) MulFNode(a,b) ); 1752 d = precision_rounding(c); 1753 push( d ); 1754 break; 1755 1756 case Bytecodes::_fdiv: 1757 b = pop(); 1758 a = pop(); 1759 c = _gvn.transform( new (C, 3) DivFNode(0,a,b) ); 1760 d = precision_rounding(c); 1761 push( d ); 1762 break; 1763 1764 case Bytecodes::_frem: 1765 if (Matcher::has_match_rule(Op_ModF)) { 1766 // Generate a ModF node. 1767 b = pop(); 1768 a = pop(); 1769 c = _gvn.transform( new (C, 3) ModFNode(0,a,b) ); 1770 d = precision_rounding(c); 1771 push( d ); 1772 } 1773 else { 1774 // Generate a call. 1775 modf(); 1776 } 1777 break; 1778 1779 case Bytecodes::_fcmpl: 1780 b = pop(); 1781 a = pop(); 1782 c = _gvn.transform( new (C, 3) CmpF3Node( a, b)); 1783 push(c); 1784 break; 1785 case Bytecodes::_fcmpg: 1786 b = pop(); 1787 a = pop(); 1788 1789 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 1790 // which negates the result sign except for unordered. Flip the unordered 1791 // as well by using CmpF3 which implements unordered-lesser instead of 1792 // unordered-greater semantics. Finally, commute the result bits. Result 1793 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 1794 c = _gvn.transform( new (C, 3) CmpF3Node( b, a)); 1795 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) ); 1796 push(c); 1797 break; 1798 1799 case Bytecodes::_f2i: 1800 a = pop(); 1801 push(_gvn.transform(new (C, 2) ConvF2INode(a))); 1802 break; 1803 1804 case Bytecodes::_d2i: 1805 a = pop_pair(); 1806 b = _gvn.transform(new (C, 2) ConvD2INode(a)); 1807 push( b ); 1808 break; 1809 1810 case Bytecodes::_f2d: 1811 a = pop(); 1812 b = _gvn.transform( new (C, 2) ConvF2DNode(a)); 1813 push_pair( b ); 1814 break; 1815 1816 case Bytecodes::_d2f: 1817 a = pop_pair(); 1818 b = _gvn.transform( new (C, 2) ConvD2FNode(a)); 1819 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 1820 //b = _gvn.transform(new (C, 2) RoundFloatNode(0, b) ); 1821 push( b ); 1822 break; 1823 1824 case Bytecodes::_l2f: 1825 if (Matcher::convL2FSupported()) { 1826 a = pop_pair(); 1827 b = _gvn.transform( new (C, 2) ConvL2FNode(a)); 1828 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 1829 // Rather than storing the result into an FP register then pushing 1830 // out to memory to round, the machine instruction that implements 1831 // ConvL2D is responsible for rounding. 1832 // c = precision_rounding(b); 1833 c = _gvn.transform(b); 1834 push(c); 1835 } else { 1836 l2f(); 1837 } 1838 break; 1839 1840 case Bytecodes::_l2d: 1841 a = pop_pair(); 1842 b = _gvn.transform( new (C, 2) ConvL2DNode(a)); 1843 // For i486.ad, rounding is always necessary (see _l2f above). 1844 // c = dprecision_rounding(b); 1845 c = _gvn.transform(b); 1846 push_pair(c); 1847 break; 1848 1849 case Bytecodes::_f2l: 1850 a = pop(); 1851 b = _gvn.transform( new (C, 2) ConvF2LNode(a)); 1852 push_pair(b); 1853 break; 1854 1855 case Bytecodes::_d2l: 1856 a = pop_pair(); 1857 b = _gvn.transform( new (C, 2) ConvD2LNode(a)); 1858 push_pair(b); 1859 break; 1860 1861 case Bytecodes::_dsub: 1862 b = pop_pair(); 1863 a = pop_pair(); 1864 c = _gvn.transform( new (C, 3) SubDNode(a,b) ); 1865 d = dprecision_rounding(c); 1866 push_pair( d ); 1867 break; 1868 1869 case Bytecodes::_dadd: 1870 b = pop_pair(); 1871 a = pop_pair(); 1872 c = _gvn.transform( new (C, 3) AddDNode(a,b) ); 1873 d = dprecision_rounding(c); 1874 push_pair( d ); 1875 break; 1876 1877 case Bytecodes::_dmul: 1878 b = pop_pair(); 1879 a = pop_pair(); 1880 c = _gvn.transform( new (C, 3) MulDNode(a,b) ); 1881 d = dprecision_rounding(c); 1882 push_pair( d ); 1883 break; 1884 1885 case Bytecodes::_ddiv: 1886 b = pop_pair(); 1887 a = pop_pair(); 1888 c = _gvn.transform( new (C, 3) DivDNode(0,a,b) ); 1889 d = dprecision_rounding(c); 1890 push_pair( d ); 1891 break; 1892 1893 case Bytecodes::_dneg: 1894 a = pop_pair(); 1895 b = _gvn.transform(new (C, 2) NegDNode (a)); 1896 push_pair(b); 1897 break; 1898 1899 case Bytecodes::_drem: 1900 if (Matcher::has_match_rule(Op_ModD)) { 1901 // Generate a ModD node. 1902 b = pop_pair(); 1903 a = pop_pair(); 1904 // a % b 1905 1906 c = _gvn.transform( new (C, 3) ModDNode(0,a,b) ); 1907 d = dprecision_rounding(c); 1908 push_pair( d ); 1909 } 1910 else { 1911 // Generate a call. 1912 modd(); 1913 } 1914 break; 1915 1916 case Bytecodes::_dcmpl: 1917 b = pop_pair(); 1918 a = pop_pair(); 1919 c = _gvn.transform( new (C, 3) CmpD3Node( a, b)); 1920 push(c); 1921 break; 1922 1923 case Bytecodes::_dcmpg: 1924 b = pop_pair(); 1925 a = pop_pair(); 1926 // Same as dcmpl but need to flip the unordered case. 1927 // Commute the inputs, which negates the result sign except for unordered. 1928 // Flip the unordered as well by using CmpD3 which implements 1929 // unordered-lesser instead of unordered-greater semantics. 1930 // Finally, negate the result bits. Result is same as using a 1931 // CmpD3Greater except we did it with CmpD3 alone. 1932 c = _gvn.transform( new (C, 3) CmpD3Node( b, a)); 1933 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) ); 1934 push(c); 1935 break; 1936 1937 1938 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 1939 case Bytecodes::_land: 1940 b = pop_pair(); 1941 a = pop_pair(); 1942 c = _gvn.transform( new (C, 3) AndLNode(a,b) ); 1943 push_pair(c); 1944 break; 1945 case Bytecodes::_lor: 1946 b = pop_pair(); 1947 a = pop_pair(); 1948 c = _gvn.transform( new (C, 3) OrLNode(a,b) ); 1949 push_pair(c); 1950 break; 1951 case Bytecodes::_lxor: 1952 b = pop_pair(); 1953 a = pop_pair(); 1954 c = _gvn.transform( new (C, 3) XorLNode(a,b) ); 1955 push_pair(c); 1956 break; 1957 1958 case Bytecodes::_lshl: 1959 b = pop(); // the shift count 1960 a = pop_pair(); // value to be shifted 1961 c = _gvn.transform( new (C, 3) LShiftLNode(a,b) ); 1962 push_pair(c); 1963 break; 1964 case Bytecodes::_lshr: 1965 b = pop(); // the shift count 1966 a = pop_pair(); // value to be shifted 1967 c = _gvn.transform( new (C, 3) RShiftLNode(a,b) ); 1968 push_pair(c); 1969 break; 1970 case Bytecodes::_lushr: 1971 b = pop(); // the shift count 1972 a = pop_pair(); // value to be shifted 1973 c = _gvn.transform( new (C, 3) URShiftLNode(a,b) ); 1974 push_pair(c); 1975 break; 1976 case Bytecodes::_lmul: 1977 b = pop_pair(); 1978 a = pop_pair(); 1979 c = _gvn.transform( new (C, 3) MulLNode(a,b) ); 1980 push_pair(c); 1981 break; 1982 1983 case Bytecodes::_lrem: 1984 // Must keep both values on the expression-stack during null-check 1985 assert(peek(0) == top(), "long word order"); 1986 do_null_check(peek(1), T_LONG); 1987 // Compile-time detect of null-exception? 1988 if (stopped()) return; 1989 b = pop_pair(); 1990 a = pop_pair(); 1991 c = _gvn.transform( new (C, 3) ModLNode(control(),a,b) ); 1992 push_pair(c); 1993 break; 1994 1995 case Bytecodes::_ldiv: 1996 // Must keep both values on the expression-stack during null-check 1997 assert(peek(0) == top(), "long word order"); 1998 do_null_check(peek(1), T_LONG); 1999 // Compile-time detect of null-exception? 2000 if (stopped()) return; 2001 b = pop_pair(); 2002 a = pop_pair(); 2003 c = _gvn.transform( new (C, 3) DivLNode(control(),a,b) ); 2004 push_pair(c); 2005 break; 2006 2007 case Bytecodes::_ladd: 2008 b = pop_pair(); 2009 a = pop_pair(); 2010 c = _gvn.transform( new (C, 3) AddLNode(a,b) ); 2011 push_pair(c); 2012 break; 2013 case Bytecodes::_lsub: 2014 b = pop_pair(); 2015 a = pop_pair(); 2016 c = _gvn.transform( new (C, 3) SubLNode(a,b) ); 2017 push_pair(c); 2018 break; 2019 case Bytecodes::_lcmp: 2020 // Safepoints are now inserted _before_ branches. The long-compare 2021 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 2022 // slew of control flow. These are usually followed by a CmpI vs zero and 2023 // a branch; this pattern then optimizes to the obvious long-compare and 2024 // branch. However, if the branch is backwards there's a Safepoint 2025 // inserted. The inserted Safepoint captures the JVM state at the 2026 // pre-branch point, i.e. it captures the 3-way value. Thus if a 2027 // long-compare is used to control a loop the debug info will force 2028 // computation of the 3-way value, even though the generated code uses a 2029 // long-compare and branch. We try to rectify the situation by inserting 2030 // a SafePoint here and have it dominate and kill the safepoint added at a 2031 // following backwards branch. At this point the JVM state merely holds 2 2032 // longs but not the 3-way value. 2033 if( UseLoopSafepoints ) { 2034 switch( iter().next_bc() ) { 2035 case Bytecodes::_ifgt: 2036 case Bytecodes::_iflt: 2037 case Bytecodes::_ifge: 2038 case Bytecodes::_ifle: 2039 case Bytecodes::_ifne: 2040 case Bytecodes::_ifeq: 2041 // If this is a backwards branch in the bytecodes, add Safepoint 2042 maybe_add_safepoint(iter().next_get_dest()); 2043 } 2044 } 2045 b = pop_pair(); 2046 a = pop_pair(); 2047 c = _gvn.transform( new (C, 3) CmpL3Node( a, b )); 2048 push(c); 2049 break; 2050 2051 case Bytecodes::_lneg: 2052 a = pop_pair(); 2053 b = _gvn.transform( new (C, 3) SubLNode(longcon(0),a)); 2054 push_pair(b); 2055 break; 2056 case Bytecodes::_l2i: 2057 a = pop_pair(); 2058 push( _gvn.transform( new (C, 2) ConvL2INode(a))); 2059 break; 2060 case Bytecodes::_i2l: 2061 a = pop(); 2062 b = _gvn.transform( new (C, 2) ConvI2LNode(a)); 2063 push_pair(b); 2064 break; 2065 case Bytecodes::_i2b: 2066 // Sign extend 2067 a = pop(); 2068 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(24)) ); 2069 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(24)) ); 2070 push( a ); 2071 break; 2072 case Bytecodes::_i2s: 2073 a = pop(); 2074 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(16)) ); 2075 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(16)) ); 2076 push( a ); 2077 break; 2078 case Bytecodes::_i2c: 2079 a = pop(); 2080 push( _gvn.transform( new (C, 3) AndINode(a,_gvn.intcon(0xFFFF)) ) ); 2081 break; 2082 2083 case Bytecodes::_i2f: 2084 a = pop(); 2085 b = _gvn.transform( new (C, 2) ConvI2FNode(a) ) ; 2086 c = precision_rounding(b); 2087 push (b); 2088 break; 2089 2090 case Bytecodes::_i2d: 2091 a = pop(); 2092 b = _gvn.transform( new (C, 2) ConvI2DNode(a)); 2093 push_pair(b); 2094 break; 2095 2096 case Bytecodes::_iinc: // Increment local 2097 i = iter().get_index(); // Get local index 2098 set_local( i, _gvn.transform( new (C, 3) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 2099 break; 2100 2101 // Exit points of synchronized methods must have an unlock node 2102 case Bytecodes::_return: 2103 return_current(NULL); 2104 break; 2105 2106 case Bytecodes::_ireturn: 2107 case Bytecodes::_areturn: 2108 case Bytecodes::_freturn: 2109 return_current(pop()); 2110 break; 2111 case Bytecodes::_lreturn: 2112 return_current(pop_pair()); 2113 break; 2114 case Bytecodes::_dreturn: 2115 return_current(pop_pair()); 2116 break; 2117 2118 case Bytecodes::_athrow: 2119 // null exception oop throws NULL pointer exception 2120 do_null_check(peek(), T_OBJECT); 2121 if (stopped()) return; 2122 // Hook the thrown exception directly to subsequent handlers. 2123 if (BailoutToInterpreterForThrows) { 2124 // Keep method interpreted from now on. 2125 uncommon_trap(Deoptimization::Reason_unhandled, 2126 Deoptimization::Action_make_not_compilable); 2127 return; 2128 } 2129 if (env()->jvmti_can_post_on_exceptions()) { 2130 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 2131 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 2132 } 2133 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 2134 add_exception_state(make_exception_state(peek())); 2135 break; 2136 2137 case Bytecodes::_goto: // fall through 2138 case Bytecodes::_goto_w: { 2139 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 2140 2141 // If this is a backwards branch in the bytecodes, add Safepoint 2142 maybe_add_safepoint(target_bci); 2143 2144 // Update method data 2145 profile_taken_branch(target_bci); 2146 2147 // Merge the current control into the target basic block 2148 merge(target_bci); 2149 2150 // See if we can get some profile data and hand it off to the next block 2151 Block *target_block = block()->successor_for_bci(target_bci); 2152 if (target_block->pred_count() != 1) break; 2153 ciMethodData* methodData = method()->method_data(); 2154 if (!methodData->is_mature()) break; 2155 ciProfileData* data = methodData->bci_to_data(bci()); 2156 assert( data->is_JumpData(), "" ); 2157 int taken = ((ciJumpData*)data)->taken(); 2158 taken = method()->scale_count(taken); 2159 target_block->set_count(taken); 2160 break; 2161 } 2162 2163 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 2164 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 2165 handle_if_null: 2166 // If this is a backwards branch in the bytecodes, add Safepoint 2167 maybe_add_safepoint(iter().get_dest()); 2168 a = null(); 2169 b = pop(); 2170 c = _gvn.transform( new (C, 3) CmpPNode(b, a) ); 2171 do_ifnull(btest, c); 2172 break; 2173 2174 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 2175 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 2176 handle_if_acmp: 2177 // If this is a backwards branch in the bytecodes, add Safepoint 2178 maybe_add_safepoint(iter().get_dest()); 2179 a = pop(); 2180 b = pop(); 2181 c = _gvn.transform( new (C, 3) CmpPNode(b, a) ); 2182 do_if(btest, c); 2183 break; 2184 2185 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 2186 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 2187 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 2188 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 2189 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 2190 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 2191 handle_ifxx: 2192 // If this is a backwards branch in the bytecodes, add Safepoint 2193 maybe_add_safepoint(iter().get_dest()); 2194 a = _gvn.intcon(0); 2195 b = pop(); 2196 c = _gvn.transform( new (C, 3) CmpINode(b, a) ); 2197 do_if(btest, c); 2198 break; 2199 2200 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 2201 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 2202 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 2203 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 2204 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 2205 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 2206 handle_if_icmp: 2207 // If this is a backwards branch in the bytecodes, add Safepoint 2208 maybe_add_safepoint(iter().get_dest()); 2209 a = pop(); 2210 b = pop(); 2211 c = _gvn.transform( new (C, 3) CmpINode( b, a ) ); 2212 do_if(btest, c); 2213 break; 2214 2215 case Bytecodes::_tableswitch: 2216 do_tableswitch(); 2217 break; 2218 2219 case Bytecodes::_lookupswitch: 2220 do_lookupswitch(); 2221 break; 2222 2223 case Bytecodes::_invokestatic: 2224 case Bytecodes::_invokedynamic: 2225 case Bytecodes::_invokespecial: 2226 case Bytecodes::_invokevirtual: 2227 case Bytecodes::_invokeinterface: 2228 do_call(); 2229 break; 2230 case Bytecodes::_checkcast: 2231 do_checkcast(); 2232 break; 2233 case Bytecodes::_instanceof: 2234 do_instanceof(); 2235 break; 2236 case Bytecodes::_anewarray: 2237 do_anewarray(); 2238 break; 2239 case Bytecodes::_newarray: 2240 do_newarray((BasicType)iter().get_index()); 2241 break; 2242 case Bytecodes::_multianewarray: 2243 do_multianewarray(); 2244 break; 2245 case Bytecodes::_new: 2246 do_new(); 2247 break; 2248 2249 case Bytecodes::_jsr: 2250 case Bytecodes::_jsr_w: 2251 do_jsr(); 2252 break; 2253 2254 case Bytecodes::_ret: 2255 do_ret(); 2256 break; 2257 2258 2259 case Bytecodes::_monitorenter: 2260 do_monitor_enter(); 2261 break; 2262 2263 case Bytecodes::_monitorexit: 2264 do_monitor_exit(); 2265 break; 2266 2267 case Bytecodes::_breakpoint: 2268 // Breakpoint set concurrently to compile 2269 // %%% use an uncommon trap? 2270 C->record_failure("breakpoint in method"); 2271 return; 2272 2273 default: 2274 #ifndef PRODUCT 2275 map()->dump(99); 2276 #endif 2277 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 2278 ShouldNotReachHere(); 2279 } 2280 2281 #ifndef PRODUCT 2282 IdealGraphPrinter *printer = IdealGraphPrinter::printer(); 2283 if(printer) { 2284 char buffer[256]; 2285 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 2286 bool old = printer->traverse_outs(); 2287 printer->set_traverse_outs(true); 2288 printer->print_method(C, buffer, 4); 2289 printer->set_traverse_outs(old); 2290 } 2291 #endif 2292 }