1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/matcher.hpp" 40 #include "opto/memnode.hpp" 41 #include "opto/mulnode.hpp" 42 #include "opto/opaquenode.hpp" 43 #include "opto/parse.hpp" 44 #include "opto/runtime.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 48 #ifndef PRODUCT 49 extern int explicit_null_checks_inserted, 50 explicit_null_checks_elided; 51 #endif 52 53 //---------------------------------array_load---------------------------------- 54 void Parse::array_load(BasicType bt) { 55 const Type* elemtype = Type::TOP; 56 bool big_val = bt == T_DOUBLE || bt == T_LONG; 57 Node* adr = array_addressing(bt, 0, &elemtype); 58 if (stopped()) return; // guaranteed null or range check 59 60 pop(); // index (already used) 61 Node* array = pop(); // the array itself 62 63 if (elemtype == TypeInt::BOOL) { 64 bt = T_BOOLEAN; 65 } else if (bt == T_OBJECT) { 66 elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 67 } 68 69 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 70 71 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt, 72 IN_HEAP | IN_HEAP_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 73 if (big_val) { 74 push_pair(ld); 75 } else { 76 push(ld); 77 } 78 } 79 80 81 //--------------------------------array_store---------------------------------- 82 void Parse::array_store(BasicType bt) { 83 const Type* elemtype = Type::TOP; 84 bool big_val = bt == T_DOUBLE || bt == T_LONG; 85 Node* adr = array_addressing(bt, big_val ? 2 : 1, &elemtype); 86 if (stopped()) return; // guaranteed null or range check 87 if (bt == T_OBJECT) { 88 array_store_check(); 89 } 90 Node* val; // Oop to store 91 if (big_val) { 92 val = pop_pair(); 93 } else { 94 val = pop(); 95 } 96 pop(); // index (already used) 97 Node* array = pop(); // the array itself 98 99 if (elemtype == TypeInt::BOOL) { 100 bt = T_BOOLEAN; 101 } else if (bt == T_OBJECT) { 102 elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 103 } 104 105 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 106 107 access_store_at(control(), array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IN_HEAP_ARRAY); 108 } 109 110 111 //------------------------------array_addressing------------------------------- 112 // Pull array and index from the stack. Compute pointer-to-element. 113 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 114 Node *idx = peek(0+vals); // Get from stack without popping 115 Node *ary = peek(1+vals); // in case of exception 116 117 // Null check the array base, with correct stack contents 118 ary = null_check(ary, T_ARRAY); 119 // Compile-time detect of null-exception? 120 if (stopped()) return top(); 121 122 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 123 const TypeInt* sizetype = arytype->size(); 124 const Type* elemtype = arytype->elem(); 125 126 if (UseUniqueSubclasses && result2 != NULL) { 127 const Type* el = elemtype->make_ptr(); 128 if (el && el->isa_instptr()) { 129 const TypeInstPtr* toop = el->is_instptr(); 130 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 131 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 132 const Type* subklass = Type::get_const_type(toop->klass()); 133 elemtype = subklass->join_speculative(el); 134 } 135 } 136 } 137 138 // Check for big class initializers with all constant offsets 139 // feeding into a known-size array. 140 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 141 // See if the highest idx value is less than the lowest array bound, 142 // and if the idx value cannot be negative: 143 bool need_range_check = true; 144 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 145 need_range_check = false; 146 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 147 } 148 149 ciKlass * arytype_klass = arytype->klass(); 150 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 151 // Only fails for some -Xcomp runs 152 // The class is unloaded. We have to run this bytecode in the interpreter. 153 uncommon_trap(Deoptimization::Reason_unloaded, 154 Deoptimization::Action_reinterpret, 155 arytype->klass(), "!loaded array"); 156 return top(); 157 } 158 159 // Do the range check 160 if (GenerateRangeChecks && need_range_check) { 161 Node* tst; 162 if (sizetype->_hi <= 0) { 163 // The greatest array bound is negative, so we can conclude that we're 164 // compiling unreachable code, but the unsigned compare trick used below 165 // only works with non-negative lengths. Instead, hack "tst" to be zero so 166 // the uncommon_trap path will always be taken. 167 tst = _gvn.intcon(0); 168 } else { 169 // Range is constant in array-oop, so we can use the original state of mem 170 Node* len = load_array_length(ary); 171 172 // Test length vs index (standard trick using unsigned compare) 173 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 174 BoolTest::mask btest = BoolTest::lt; 175 tst = _gvn.transform( new BoolNode(chk, btest) ); 176 } 177 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 178 _gvn.set_type(rc, rc->Value(&_gvn)); 179 if (!tst->is_Con()) { 180 record_for_igvn(rc); 181 } 182 set_control(_gvn.transform(new IfTrueNode(rc))); 183 // Branch to failure if out of bounds 184 { 185 PreserveJVMState pjvms(this); 186 set_control(_gvn.transform(new IfFalseNode(rc))); 187 if (C->allow_range_check_smearing()) { 188 // Do not use builtin_throw, since range checks are sometimes 189 // made more stringent by an optimistic transformation. 190 // This creates "tentative" range checks at this point, 191 // which are not guaranteed to throw exceptions. 192 // See IfNode::Ideal, is_range_check, adjust_check. 193 uncommon_trap(Deoptimization::Reason_range_check, 194 Deoptimization::Action_make_not_entrant, 195 NULL, "range_check"); 196 } else { 197 // If we have already recompiled with the range-check-widening 198 // heroic optimization turned off, then we must really be throwing 199 // range check exceptions. 200 builtin_throw(Deoptimization::Reason_range_check, idx); 201 } 202 } 203 } 204 // Check for always knowing you are throwing a range-check exception 205 if (stopped()) return top(); 206 207 // Make array address computation control dependent to prevent it 208 // from floating above the range check during loop optimizations. 209 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 210 211 if (result2 != NULL) *result2 = elemtype; 212 213 assert(ptr != top(), "top should go hand-in-hand with stopped"); 214 215 return ptr; 216 } 217 218 219 // returns IfNode 220 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) { 221 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 222 Node *tst = _gvn.transform(new BoolNode(cmp, mask)); 223 IfNode *iff = create_and_map_if(control(), tst, prob, cnt); 224 return iff; 225 } 226 227 // return Region node 228 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 229 Node *region = new RegionNode(3); // 2 results 230 record_for_igvn(region); 231 region->init_req(1, iffalse); 232 region->init_req(2, iftrue ); 233 _gvn.set_type(region, Type::CONTROL); 234 region = _gvn.transform(region); 235 set_control (region); 236 return region; 237 } 238 239 // sentinel value for the target bci to mark never taken branches 240 // (according to profiling) 241 static const int never_reached = INT_MAX; 242 243 //------------------------------helper for tableswitch------------------------- 244 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 245 // True branch, use existing map info 246 { PreserveJVMState pjvms(this); 247 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 248 set_control( iftrue ); 249 if (unc) { 250 repush_if_args(); 251 uncommon_trap(Deoptimization::Reason_unstable_if, 252 Deoptimization::Action_reinterpret, 253 NULL, 254 "taken always"); 255 } else { 256 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 257 profile_switch_case(prof_table_index); 258 merge_new_path(dest_bci_if_true); 259 } 260 } 261 262 // False branch 263 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 264 set_control( iffalse ); 265 } 266 267 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 268 // True branch, use existing map info 269 { PreserveJVMState pjvms(this); 270 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 271 set_control( iffalse ); 272 if (unc) { 273 repush_if_args(); 274 uncommon_trap(Deoptimization::Reason_unstable_if, 275 Deoptimization::Action_reinterpret, 276 NULL, 277 "taken never"); 278 } else { 279 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 280 profile_switch_case(prof_table_index); 281 merge_new_path(dest_bci_if_true); 282 } 283 } 284 285 // False branch 286 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 287 set_control( iftrue ); 288 } 289 290 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index, bool unc) { 291 // False branch, use existing map and control() 292 if (unc) { 293 repush_if_args(); 294 uncommon_trap(Deoptimization::Reason_unstable_if, 295 Deoptimization::Action_reinterpret, 296 NULL, 297 "taken never"); 298 } else { 299 assert(dest_bci != never_reached, "inconsistent dest"); 300 profile_switch_case(prof_table_index); 301 merge_new_path(dest_bci); 302 } 303 } 304 305 306 extern "C" { 307 static int jint_cmp(const void *i, const void *j) { 308 int a = *(jint *)i; 309 int b = *(jint *)j; 310 return a > b ? 1 : a < b ? -1 : 0; 311 } 312 } 313 314 315 // Default value for methodData switch indexing. Must be a negative value to avoid 316 // conflict with any legal switch index. 317 #define NullTableIndex -1 318 319 class SwitchRange : public StackObj { 320 // a range of integers coupled with a bci destination 321 jint _lo; // inclusive lower limit 322 jint _hi; // inclusive upper limit 323 int _dest; 324 int _table_index; // index into method data table 325 float _cnt; // how many times this range was hit according to profiling 326 327 public: 328 jint lo() const { return _lo; } 329 jint hi() const { return _hi; } 330 int dest() const { return _dest; } 331 int table_index() const { return _table_index; } 332 bool is_singleton() const { return _lo == _hi; } 333 float cnt() const { return _cnt; } 334 335 void setRange(jint lo, jint hi, int dest, int table_index, float cnt) { 336 assert(lo <= hi, "must be a non-empty range"); 337 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; _cnt = cnt; 338 assert(_cnt >= 0, ""); 339 } 340 bool adjoinRange(jint lo, jint hi, int dest, int table_index, float cnt, bool trim_ranges) { 341 assert(lo <= hi, "must be a non-empty range"); 342 if (lo == _hi+1 && table_index == _table_index) { 343 // see merge_ranges() comment below 344 if (trim_ranges) { 345 if (cnt == 0) { 346 if (_cnt != 0) { 347 return false; 348 } 349 if (dest != _dest) { 350 _dest = never_reached; 351 } 352 } else { 353 if (_cnt == 0) { 354 return false; 355 } 356 if (dest != _dest) { 357 return false; 358 } 359 } 360 } else { 361 if (dest != _dest) { 362 return false; 363 } 364 } 365 _hi = hi; 366 _cnt += cnt; 367 return true; 368 } 369 return false; 370 } 371 372 void set (jint value, int dest, int table_index, float cnt) { 373 setRange(value, value, dest, table_index, cnt); 374 } 375 bool adjoin(jint value, int dest, int table_index, float cnt, bool trim_ranges) { 376 return adjoinRange(value, value, dest, table_index, cnt, trim_ranges); 377 } 378 bool adjoin(SwitchRange& other) { 379 return adjoinRange(other._lo, other._hi, other._dest, other._table_index, other._cnt, false); 380 } 381 382 void print() { 383 if (is_singleton()) 384 tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt()); 385 else if (lo() == min_jint) 386 tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt()); 387 else if (hi() == max_jint) 388 tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt()); 389 else 390 tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt()); 391 } 392 }; 393 394 // We try to minimize the number of ranges and the size of the taken 395 // ones using profiling data. When ranges are created, 396 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge 397 // if both were never hit or both were hit to build longer unreached 398 // ranges. Here, we now merge adjoining ranges with the same 399 // destination and finally set destination of unreached ranges to the 400 // special value never_reached because it can help minimize the number 401 // of tests that are necessary. 402 // 403 // For instance: 404 // [0, 1] to target1 sometimes taken 405 // [1, 2] to target1 never taken 406 // [2, 3] to target2 never taken 407 // would lead to: 408 // [0, 1] to target1 sometimes taken 409 // [1, 3] never taken 410 // 411 // (first 2 ranges to target1 are not merged) 412 static void merge_ranges(SwitchRange* ranges, int& rp) { 413 if (rp == 0) { 414 return; 415 } 416 int shift = 0; 417 for (int j = 0; j < rp; j++) { 418 SwitchRange& r1 = ranges[j-shift]; 419 SwitchRange& r2 = ranges[j+1]; 420 if (r1.adjoin(r2)) { 421 shift++; 422 } else if (shift > 0) { 423 ranges[j+1-shift] = r2; 424 } 425 } 426 rp -= shift; 427 for (int j = 0; j <= rp; j++) { 428 SwitchRange& r = ranges[j]; 429 if (r.cnt() == 0 && r.dest() != never_reached) { 430 r.setRange(r.lo(), r.hi(), never_reached, r.table_index(), r.cnt()); 431 } 432 } 433 } 434 435 //-------------------------------do_tableswitch-------------------------------- 436 void Parse::do_tableswitch() { 437 Node* lookup = pop(); 438 // Get information about tableswitch 439 int default_dest = iter().get_dest_table(0); 440 int lo_index = iter().get_int_table(1); 441 int hi_index = iter().get_int_table(2); 442 int len = hi_index - lo_index + 1; 443 444 if (len < 1) { 445 // If this is a backward branch, add safepoint 446 maybe_add_safepoint(default_dest); 447 merge(default_dest); 448 return; 449 } 450 451 ciMethodData* methodData = method()->method_data(); 452 ciMultiBranchData* profile = NULL; 453 if (methodData->is_mature() && UseSwitchProfiling) { 454 ciProfileData* data = methodData->bci_to_data(bci()); 455 if (data != NULL && data->is_MultiBranchData()) { 456 profile = (ciMultiBranchData*)data; 457 } 458 } 459 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 460 461 // generate decision tree, using trichotomy when possible 462 int rnum = len+2; 463 bool makes_backward_branch = false; 464 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 465 int rp = -1; 466 if (lo_index != min_jint) { 467 uint cnt = 1; 468 if (profile != NULL) { 469 cnt = profile->default_count() / (hi_index != max_jint ? 2 : 1); 470 } 471 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex, cnt); 472 } 473 for (int j = 0; j < len; j++) { 474 jint match_int = lo_index+j; 475 int dest = iter().get_dest_table(j+3); 476 makes_backward_branch |= (dest <= bci()); 477 int table_index = method_data_update() ? j : NullTableIndex; 478 uint cnt = 1; 479 if (profile != NULL) { 480 cnt = profile->count_at(j); 481 } 482 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 483 ranges[++rp].set(match_int, dest, table_index, cnt); 484 } 485 } 486 jint highest = lo_index+(len-1); 487 assert(ranges[rp].hi() == highest, ""); 488 if (highest != max_jint) { 489 uint cnt = 1; 490 if (profile != NULL) { 491 cnt = profile->default_count() / (lo_index != min_jint ? 2 : 1); 492 } 493 if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, cnt, trim_ranges)) { 494 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, cnt); 495 } 496 } 497 assert(rp < len+2, "not too many ranges"); 498 499 if (trim_ranges) { 500 merge_ranges(ranges, rp); 501 } 502 503 // Safepoint in case if backward branch observed 504 if( makes_backward_branch && UseLoopSafepoints ) 505 add_safepoint(); 506 507 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 508 } 509 510 511 //------------------------------do_lookupswitch-------------------------------- 512 void Parse::do_lookupswitch() { 513 Node *lookup = pop(); // lookup value 514 // Get information about lookupswitch 515 int default_dest = iter().get_dest_table(0); 516 int len = iter().get_int_table(1); 517 518 if (len < 1) { // If this is a backward branch, add safepoint 519 maybe_add_safepoint(default_dest); 520 merge(default_dest); 521 return; 522 } 523 524 ciMethodData* methodData = method()->method_data(); 525 ciMultiBranchData* profile = NULL; 526 if (methodData->is_mature() && UseSwitchProfiling) { 527 ciProfileData* data = methodData->bci_to_data(bci()); 528 if (data != NULL && data->is_MultiBranchData()) { 529 profile = (ciMultiBranchData*)data; 530 } 531 } 532 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 533 534 // generate decision tree, using trichotomy when possible 535 jint* table = NEW_RESOURCE_ARRAY(jint, len*3); 536 { 537 for (int j = 0; j < len; j++) { 538 table[3*j+0] = iter().get_int_table(2+2*j); 539 table[3*j+1] = iter().get_dest_table(2+2*j+1); 540 table[3*j+2] = profile == NULL ? 1 : profile->count_at(j); 541 } 542 qsort(table, len, 3*sizeof(table[0]), jint_cmp); 543 } 544 545 float defaults = 0; 546 jint prev = min_jint; 547 for (int j = 0; j < len; j++) { 548 jint match_int = table[3*j+0]; 549 if (match_int != prev) { 550 defaults += (float)match_int - prev; 551 } 552 prev = match_int+1; 553 } 554 if (prev-1 != max_jint) { 555 defaults += (float)max_jint - prev + 1; 556 } 557 float default_cnt = 1; 558 if (profile != NULL) { 559 default_cnt = profile->default_count()/defaults; 560 } 561 562 int rnum = len*2+1; 563 bool makes_backward_branch = false; 564 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 565 int rp = -1; 566 for (int j = 0; j < len; j++) { 567 jint match_int = table[3*j+0]; 568 int dest = table[3*j+1]; 569 int cnt = table[3*j+2]; 570 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 571 int table_index = method_data_update() ? j : NullTableIndex; 572 makes_backward_branch |= (dest <= bci()); 573 float c = default_cnt * ((float)match_int - next_lo); 574 if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, NullTableIndex, c, trim_ranges))) { 575 assert(default_dest != never_reached, "sentinel value for dead destinations"); 576 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex, c); 577 } 578 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 579 assert(dest != never_reached, "sentinel value for dead destinations"); 580 ranges[++rp].set(match_int, dest, table_index, cnt); 581 } 582 } 583 jint highest = table[3*(len-1)]; 584 assert(ranges[rp].hi() == highest, ""); 585 if (highest != max_jint && 586 !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest), trim_ranges)) { 587 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest)); 588 } 589 assert(rp < rnum, "not too many ranges"); 590 591 if (trim_ranges) { 592 merge_ranges(ranges, rp); 593 } 594 595 // Safepoint in case backward branch observed 596 if (makes_backward_branch && UseLoopSafepoints) 597 add_safepoint(); 598 599 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 600 } 601 602 static float if_prob(float taken_cnt, float total_cnt) { 603 assert(taken_cnt <= total_cnt, ""); 604 if (total_cnt == 0) { 605 return PROB_FAIR; 606 } 607 float p = taken_cnt / total_cnt; 608 return MIN2(MAX2(p, PROB_MIN), PROB_MAX); 609 } 610 611 static float if_cnt(float cnt) { 612 if (cnt == 0) { 613 return COUNT_UNKNOWN; 614 } 615 return cnt; 616 } 617 618 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) { 619 float total_cnt = 0; 620 for (SwitchRange* sr = lo; sr <= hi; sr++) { 621 total_cnt += sr->cnt(); 622 } 623 return total_cnt; 624 } 625 626 class SwitchRanges : public ResourceObj { 627 public: 628 SwitchRange* _lo; 629 SwitchRange* _hi; 630 SwitchRange* _mid; 631 float _cost; 632 633 enum { 634 Start, 635 LeftDone, 636 RightDone, 637 Done 638 } _state; 639 640 SwitchRanges(SwitchRange *lo, SwitchRange *hi) 641 : _lo(lo), _hi(hi), _mid(NULL), 642 _cost(0), _state(Start) { 643 } 644 645 SwitchRanges() 646 : _lo(NULL), _hi(NULL), _mid(NULL), 647 _cost(0), _state(Start) {} 648 }; 649 650 // Estimate cost of performing a binary search on lo..hi 651 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) { 652 GrowableArray<SwitchRanges> tree; 653 SwitchRanges root(lo, hi); 654 tree.push(root); 655 656 float cost = 0; 657 do { 658 SwitchRanges& r = *tree.adr_at(tree.length()-1); 659 if (r._hi != r._lo) { 660 if (r._mid == NULL) { 661 float r_cnt = sum_of_cnts(r._lo, r._hi); 662 663 if (r_cnt == 0) { 664 tree.pop(); 665 cost = 0; 666 continue; 667 } 668 669 SwitchRange* mid = NULL; 670 mid = r._lo; 671 for (float cnt = 0; ; ) { 672 assert(mid <= r._hi, "out of bounds"); 673 cnt += mid->cnt(); 674 if (cnt > r_cnt / 2) { 675 break; 676 } 677 mid++; 678 } 679 assert(mid <= r._hi, "out of bounds"); 680 r._mid = mid; 681 r._cost = r_cnt / total_cnt; 682 } 683 r._cost += cost; 684 if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) { 685 cost = 0; 686 r._state = SwitchRanges::LeftDone; 687 tree.push(SwitchRanges(r._lo, r._mid-1)); 688 } else if (r._state < SwitchRanges::RightDone) { 689 cost = 0; 690 r._state = SwitchRanges::RightDone; 691 tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi)); 692 } else { 693 tree.pop(); 694 cost = r._cost; 695 } 696 } else { 697 tree.pop(); 698 cost = r._cost; 699 } 700 } while (tree.length() > 0); 701 702 703 return cost; 704 } 705 706 // It sometimes pays off to test most common ranges before the binary search 707 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) { 708 uint nr = hi - lo + 1; 709 float total_cnt = sum_of_cnts(lo, hi); 710 711 float min = compute_tree_cost(lo, hi, total_cnt); 712 float extra = 1; 713 float sub = 0; 714 715 SwitchRange* array1 = lo; 716 SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr); 717 718 SwitchRange* ranges = NULL; 719 720 while (nr >= 2) { 721 assert(lo == array1 || lo == array2, "one the 2 already allocated arrays"); 722 ranges = (lo == array1) ? array2 : array1; 723 724 // Find highest frequency range 725 SwitchRange* candidate = lo; 726 for (SwitchRange* sr = lo+1; sr <= hi; sr++) { 727 if (sr->cnt() > candidate->cnt()) { 728 candidate = sr; 729 } 730 } 731 SwitchRange most_freq = *candidate; 732 if (most_freq.cnt() == 0) { 733 break; 734 } 735 736 // Copy remaining ranges into another array 737 int shift = 0; 738 for (uint i = 0; i < nr; i++) { 739 SwitchRange* sr = &lo[i]; 740 if (sr != candidate) { 741 ranges[i-shift] = *sr; 742 } else { 743 shift++; 744 if (i > 0 && i < nr-1) { 745 SwitchRange prev = lo[i-1]; 746 prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.table_index(), prev.cnt()); 747 if (prev.adjoin(lo[i+1])) { 748 shift++; 749 i++; 750 } 751 ranges[i-shift] = prev; 752 } 753 } 754 } 755 nr -= shift; 756 757 // Evaluate cost of testing the most common range and performing a 758 // binary search on the other ranges 759 float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt); 760 if (cost >= min) { 761 break; 762 } 763 // swap arrays 764 lo = &ranges[0]; 765 hi = &ranges[nr-1]; 766 767 // It pays off: emit the test for the most common range 768 assert(most_freq.cnt() > 0, "must be taken"); 769 Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo()))); 770 Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo()))); 771 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le)); 772 IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt())); 773 jump_if_true_fork(iff, most_freq.dest(), most_freq.table_index(), false); 774 775 sub += most_freq.cnt() / total_cnt; 776 extra += 1 - sub; 777 min = cost; 778 } 779 } 780 781 //----------------------------create_jump_tables------------------------------- 782 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 783 // Are jumptables enabled 784 if (!UseJumpTables) return false; 785 786 // Are jumptables supported 787 if (!Matcher::has_match_rule(Op_Jump)) return false; 788 789 // Don't make jump table if profiling 790 if (method_data_update()) return false; 791 792 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 793 794 // Decide if a guard is needed to lop off big ranges at either (or 795 // both) end(s) of the input set. We'll call this the default target 796 // even though we can't be sure that it is the true "default". 797 798 bool needs_guard = false; 799 int default_dest; 800 int64_t total_outlier_size = 0; 801 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 802 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 803 804 if (lo->dest() == hi->dest()) { 805 total_outlier_size = hi_size + lo_size; 806 default_dest = lo->dest(); 807 } else if (lo_size > hi_size) { 808 total_outlier_size = lo_size; 809 default_dest = lo->dest(); 810 } else { 811 total_outlier_size = hi_size; 812 default_dest = hi->dest(); 813 } 814 815 float total = sum_of_cnts(lo, hi); 816 float cost = compute_tree_cost(lo, hi, total); 817 818 // If a guard test will eliminate very sparse end ranges, then 819 // it is worth the cost of an extra jump. 820 float trimmed_cnt = 0; 821 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 822 needs_guard = true; 823 if (default_dest == lo->dest()) { 824 trimmed_cnt += lo->cnt(); 825 lo++; 826 } 827 if (default_dest == hi->dest()) { 828 trimmed_cnt += hi->cnt(); 829 hi--; 830 } 831 } 832 833 // Find the total number of cases and ranges 834 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 835 int num_range = hi - lo + 1; 836 837 // Don't create table if: too large, too small, or too sparse. 838 if (num_cases > MaxJumpTableSize) 839 return false; 840 if (UseSwitchProfiling) { 841 // MinJumpTableSize is set so with a well balanced binary tree, 842 // when the number of ranges is MinJumpTableSize, it's cheaper to 843 // go through a JumpNode that a tree of IfNodes. Average cost of a 844 // tree of IfNodes with MinJumpTableSize is 845 // log2f(MinJumpTableSize) comparisons. So if the cost computed 846 // from profile data is less than log2f(MinJumpTableSize) then 847 // going with the binary search is cheaper. 848 if (cost < log2f(MinJumpTableSize)) { 849 return false; 850 } 851 } else { 852 if (num_cases < MinJumpTableSize) 853 return false; 854 } 855 if (num_cases > (MaxJumpTableSparseness * num_range)) 856 return false; 857 858 // Normalize table lookups to zero 859 int lowval = lo->lo(); 860 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 861 862 // Generate a guard to protect against input keyvals that aren't 863 // in the switch domain. 864 if (needs_guard) { 865 Node* size = _gvn.intcon(num_cases); 866 Node* cmp = _gvn.transform(new CmpUNode(key_val, size)); 867 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge)); 868 IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt)); 869 jump_if_true_fork(iff, default_dest, NullTableIndex, trim_ranges && trimmed_cnt == 0); 870 871 total -= trimmed_cnt; 872 } 873 874 // Create an ideal node JumpTable that has projections 875 // of all possible ranges for a switch statement 876 // The key_val input must be converted to a pointer offset and scaled. 877 // Compare Parse::array_addressing above. 878 879 // Clean the 32-bit int into a real 64-bit offset. 880 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 881 const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); 882 // Make I2L conversion control dependent to prevent it from 883 // floating above the range check during loop optimizations. 884 key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); 885 886 // Shift the value by wordsize so we have an index into the table, rather 887 // than a switch value 888 Node *shiftWord = _gvn.MakeConX(wordSize); 889 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 890 891 // Create the JumpNode 892 Arena* arena = C->comp_arena(); 893 float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases); 894 int i = 0; 895 if (total == 0) { 896 for (SwitchRange* r = lo; r <= hi; r++) { 897 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 898 probs[i] = 1.0F / num_cases; 899 } 900 } 901 } else { 902 for (SwitchRange* r = lo; r <= hi; r++) { 903 float prob = r->cnt()/total; 904 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 905 probs[i] = prob / (r->hi() - r->lo() + 1); 906 } 907 } 908 } 909 910 ciMethodData* methodData = method()->method_data(); 911 ciMultiBranchData* profile = NULL; 912 if (methodData->is_mature()) { 913 ciProfileData* data = methodData->bci_to_data(bci()); 914 if (data != NULL && data->is_MultiBranchData()) { 915 profile = (ciMultiBranchData*)data; 916 } 917 } 918 919 Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total)); 920 921 // These are the switch destinations hanging off the jumpnode 922 i = 0; 923 for (SwitchRange* r = lo; r <= hi; r++) { 924 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 925 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 926 { 927 PreserveJVMState pjvms(this); 928 set_control(input); 929 jump_if_always_fork(r->dest(), r->table_index(), trim_ranges && r->cnt() == 0); 930 } 931 } 932 } 933 assert(i == num_cases, "miscount of cases"); 934 stop_and_kill_map(); // no more uses for this JVMS 935 return true; 936 } 937 938 //----------------------------jump_switch_ranges------------------------------- 939 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 940 Block* switch_block = block(); 941 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 942 943 if (switch_depth == 0) { 944 // Do special processing for the top-level call. 945 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 946 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 947 948 // Decrement pred-numbers for the unique set of nodes. 949 #ifdef ASSERT 950 if (!trim_ranges) { 951 // Ensure that the block's successors are a (duplicate-free) set. 952 int successors_counted = 0; // block occurrences in [hi..lo] 953 int unique_successors = switch_block->num_successors(); 954 for (int i = 0; i < unique_successors; i++) { 955 Block* target = switch_block->successor_at(i); 956 957 // Check that the set of successors is the same in both places. 958 int successors_found = 0; 959 for (SwitchRange* p = lo; p <= hi; p++) { 960 if (p->dest() == target->start()) successors_found++; 961 } 962 assert(successors_found > 0, "successor must be known"); 963 successors_counted += successors_found; 964 } 965 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 966 } 967 #endif 968 969 // Maybe prune the inputs, based on the type of key_val. 970 jint min_val = min_jint; 971 jint max_val = max_jint; 972 const TypeInt* ti = key_val->bottom_type()->isa_int(); 973 if (ti != NULL) { 974 min_val = ti->_lo; 975 max_val = ti->_hi; 976 assert(min_val <= max_val, "invalid int type"); 977 } 978 while (lo->hi() < min_val) { 979 lo++; 980 } 981 if (lo->lo() < min_val) { 982 lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index(), lo->cnt()); 983 } 984 while (hi->lo() > max_val) { 985 hi--; 986 } 987 if (hi->hi() > max_val) { 988 hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index(), hi->cnt()); 989 } 990 991 linear_search_switch_ranges(key_val, lo, hi); 992 } 993 994 #ifndef PRODUCT 995 if (switch_depth == 0) { 996 _max_switch_depth = 0; 997 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 998 } 999 #endif 1000 1001 assert(lo <= hi, "must be a non-empty set of ranges"); 1002 if (lo == hi) { 1003 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1004 } else { 1005 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 1006 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 1007 1008 if (create_jump_tables(key_val, lo, hi)) return; 1009 1010 SwitchRange* mid = NULL; 1011 float total_cnt = sum_of_cnts(lo, hi); 1012 1013 int nr = hi - lo + 1; 1014 if (UseSwitchProfiling) { 1015 // Don't keep the binary search tree balanced: pick up mid point 1016 // that split frequencies in half. 1017 float cnt = 0; 1018 for (SwitchRange* sr = lo; sr <= hi; sr++) { 1019 cnt += sr->cnt(); 1020 if (cnt >= total_cnt / 2) { 1021 mid = sr; 1022 break; 1023 } 1024 } 1025 } else { 1026 mid = lo + nr/2; 1027 1028 // if there is an easy choice, pivot at a singleton: 1029 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 1030 1031 assert(lo < mid && mid <= hi, "good pivot choice"); 1032 assert(nr != 2 || mid == hi, "should pick higher of 2"); 1033 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 1034 } 1035 1036 1037 Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo()); 1038 1039 if (mid->is_singleton()) { 1040 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt())); 1041 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index(), trim_ranges && mid->cnt() == 0); 1042 1043 // Special Case: If there are exactly three ranges, and the high 1044 // and low range each go to the same place, omit the "gt" test, 1045 // since it will not discriminate anything. 1046 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo; 1047 1048 // if there is a higher range, test for it and process it: 1049 if (mid < hi && !eq_test_only) { 1050 // two comparisons of same values--should enable 1 test for 2 branches 1051 // Use BoolTest::le instead of BoolTest::gt 1052 float cnt = sum_of_cnts(lo, mid-1); 1053 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le, if_prob(cnt, total_cnt), if_cnt(cnt)); 1054 Node *iftrue = _gvn.transform( new IfTrueNode(iff_le) ); 1055 Node *iffalse = _gvn.transform( new IfFalseNode(iff_le) ); 1056 { PreserveJVMState pjvms(this); 1057 set_control(iffalse); 1058 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 1059 } 1060 set_control(iftrue); 1061 } 1062 1063 } else { 1064 // mid is a range, not a singleton, so treat mid..hi as a unit 1065 float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi); 1066 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt)); 1067 1068 // if there is a higher range, test for it and process it: 1069 if (mid == hi) { 1070 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index(), trim_ranges && cnt == 0); 1071 } else { 1072 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 1073 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 1074 { PreserveJVMState pjvms(this); 1075 set_control(iftrue); 1076 jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1); 1077 } 1078 set_control(iffalse); 1079 } 1080 } 1081 1082 // in any case, process the lower range 1083 if (mid == lo) { 1084 if (mid->is_singleton()) { 1085 jump_switch_ranges(key_val, lo+1, hi, switch_depth+1); 1086 } else { 1087 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1088 } 1089 } else { 1090 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 1091 } 1092 } 1093 1094 // Decrease pred_count for each successor after all is done. 1095 if (switch_depth == 0) { 1096 int unique_successors = switch_block->num_successors(); 1097 for (int i = 0; i < unique_successors; i++) { 1098 Block* target = switch_block->successor_at(i); 1099 // Throw away the pre-allocated path for each unique successor. 1100 target->next_path_num(); 1101 } 1102 } 1103 1104 #ifndef PRODUCT 1105 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 1106 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 1107 SwitchRange* r; 1108 int nsing = 0; 1109 for( r = lo; r <= hi; r++ ) { 1110 if( r->is_singleton() ) nsing++; 1111 } 1112 tty->print(">>> "); 1113 _method->print_short_name(); 1114 tty->print_cr(" switch decision tree"); 1115 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 1116 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 1117 if (_max_switch_depth > _est_switch_depth) { 1118 tty->print_cr("******** BAD SWITCH DEPTH ********"); 1119 } 1120 tty->print(" "); 1121 for( r = lo; r <= hi; r++ ) { 1122 r->print(); 1123 } 1124 tty->cr(); 1125 } 1126 #endif 1127 } 1128 1129 void Parse::modf() { 1130 Node *f2 = pop(); 1131 Node *f1 = pop(); 1132 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 1133 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 1134 "frem", NULL, //no memory effects 1135 f1, f2); 1136 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1137 1138 push(res); 1139 } 1140 1141 void Parse::modd() { 1142 Node *d2 = pop_pair(); 1143 Node *d1 = pop_pair(); 1144 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 1145 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 1146 "drem", NULL, //no memory effects 1147 d1, top(), d2, top()); 1148 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1149 1150 #ifdef ASSERT 1151 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 1152 assert(res_top == top(), "second value must be top"); 1153 #endif 1154 1155 push_pair(res_d); 1156 } 1157 1158 void Parse::l2f() { 1159 Node* f2 = pop(); 1160 Node* f1 = pop(); 1161 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 1162 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 1163 "l2f", NULL, //no memory effects 1164 f1, f2); 1165 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1166 1167 push(res); 1168 } 1169 1170 void Parse::do_irem() { 1171 // Must keep both values on the expression-stack during null-check 1172 zero_check_int(peek()); 1173 // Compile-time detect of null-exception? 1174 if (stopped()) return; 1175 1176 Node* b = pop(); 1177 Node* a = pop(); 1178 1179 const Type *t = _gvn.type(b); 1180 if (t != Type::TOP) { 1181 const TypeInt *ti = t->is_int(); 1182 if (ti->is_con()) { 1183 int divisor = ti->get_con(); 1184 // check for positive power of 2 1185 if (divisor > 0 && 1186 (divisor & ~(divisor-1)) == divisor) { 1187 // yes ! 1188 Node *mask = _gvn.intcon((divisor - 1)); 1189 // Sigh, must handle negative dividends 1190 Node *zero = _gvn.intcon(0); 1191 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt, PROB_FAIR, COUNT_UNKNOWN); 1192 Node *iff = _gvn.transform( new IfFalseNode(ifff) ); 1193 Node *ift = _gvn.transform( new IfTrueNode (ifff) ); 1194 Node *reg = jump_if_join(ift, iff); 1195 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 1196 // Negative path; negate/and/negate 1197 Node *neg = _gvn.transform( new SubINode(zero, a) ); 1198 Node *andn= _gvn.transform( new AndINode(neg, mask) ); 1199 Node *negn= _gvn.transform( new SubINode(zero, andn) ); 1200 phi->init_req(1, negn); 1201 // Fast positive case 1202 Node *andx = _gvn.transform( new AndINode(a, mask) ); 1203 phi->init_req(2, andx); 1204 // Push the merge 1205 push( _gvn.transform(phi) ); 1206 return; 1207 } 1208 } 1209 } 1210 // Default case 1211 push( _gvn.transform( new ModINode(control(),a,b) ) ); 1212 } 1213 1214 // Handle jsr and jsr_w bytecode 1215 void Parse::do_jsr() { 1216 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 1217 1218 // Store information about current state, tagged with new _jsr_bci 1219 int return_bci = iter().next_bci(); 1220 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 1221 1222 // Update method data 1223 profile_taken_branch(jsr_bci); 1224 1225 // The way we do things now, there is only one successor block 1226 // for the jsr, because the target code is cloned by ciTypeFlow. 1227 Block* target = successor_for_bci(jsr_bci); 1228 1229 // What got pushed? 1230 const Type* ret_addr = target->peek(); 1231 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 1232 1233 // Effect on jsr on stack 1234 push(_gvn.makecon(ret_addr)); 1235 1236 // Flow to the jsr. 1237 merge(jsr_bci); 1238 } 1239 1240 // Handle ret bytecode 1241 void Parse::do_ret() { 1242 // Find to whom we return. 1243 assert(block()->num_successors() == 1, "a ret can only go one place now"); 1244 Block* target = block()->successor_at(0); 1245 assert(!target->is_ready(), "our arrival must be expected"); 1246 profile_ret(target->flow()->start()); 1247 int pnum = target->next_path_num(); 1248 merge_common(target, pnum); 1249 } 1250 1251 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 1252 if (btest != BoolTest::eq && btest != BoolTest::ne) { 1253 // Only ::eq and ::ne are supported for profile injection. 1254 return false; 1255 } 1256 if (test->is_Cmp() && 1257 test->in(1)->Opcode() == Op_ProfileBoolean) { 1258 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 1259 int false_cnt = profile->false_count(); 1260 int true_cnt = profile->true_count(); 1261 1262 // Counts matching depends on the actual test operation (::eq or ::ne). 1263 // No need to scale the counts because profile injection was designed 1264 // to feed exact counts into VM. 1265 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 1266 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 1267 1268 profile->consume(); 1269 return true; 1270 } 1271 return false; 1272 } 1273 //--------------------------dynamic_branch_prediction-------------------------- 1274 // Try to gather dynamic branch prediction behavior. Return a probability 1275 // of the branch being taken and set the "cnt" field. Returns a -1.0 1276 // if we need to use static prediction for some reason. 1277 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 1278 ResourceMark rm; 1279 1280 cnt = COUNT_UNKNOWN; 1281 1282 int taken = 0; 1283 int not_taken = 0; 1284 1285 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 1286 1287 if (use_mdo) { 1288 // Use MethodData information if it is available 1289 // FIXME: free the ProfileData structure 1290 ciMethodData* methodData = method()->method_data(); 1291 if (!methodData->is_mature()) return PROB_UNKNOWN; 1292 ciProfileData* data = methodData->bci_to_data(bci()); 1293 if (data == NULL) { 1294 return PROB_UNKNOWN; 1295 } 1296 if (!data->is_JumpData()) return PROB_UNKNOWN; 1297 1298 // get taken and not taken values 1299 taken = data->as_JumpData()->taken(); 1300 not_taken = 0; 1301 if (data->is_BranchData()) { 1302 not_taken = data->as_BranchData()->not_taken(); 1303 } 1304 1305 // scale the counts to be commensurate with invocation counts: 1306 taken = method()->scale_count(taken); 1307 not_taken = method()->scale_count(not_taken); 1308 } 1309 1310 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 1311 // We also check that individual counters are positive first, otherwise the sum can become positive. 1312 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 1313 if (C->log() != NULL) { 1314 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 1315 } 1316 return PROB_UNKNOWN; 1317 } 1318 1319 // Compute frequency that we arrive here 1320 float sum = taken + not_taken; 1321 // Adjust, if this block is a cloned private block but the 1322 // Jump counts are shared. Taken the private counts for 1323 // just this path instead of the shared counts. 1324 if( block()->count() > 0 ) 1325 sum = block()->count(); 1326 cnt = sum / FreqCountInvocations; 1327 1328 // Pin probability to sane limits 1329 float prob; 1330 if( !taken ) 1331 prob = (0+PROB_MIN) / 2; 1332 else if( !not_taken ) 1333 prob = (1+PROB_MAX) / 2; 1334 else { // Compute probability of true path 1335 prob = (float)taken / (float)(taken + not_taken); 1336 if (prob > PROB_MAX) prob = PROB_MAX; 1337 if (prob < PROB_MIN) prob = PROB_MIN; 1338 } 1339 1340 assert((cnt > 0.0f) && (prob > 0.0f), 1341 "Bad frequency assignment in if"); 1342 1343 if (C->log() != NULL) { 1344 const char* prob_str = NULL; 1345 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 1346 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 1347 char prob_str_buf[30]; 1348 if (prob_str == NULL) { 1349 sprintf(prob_str_buf, "%g", prob); 1350 prob_str = prob_str_buf; 1351 } 1352 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 1353 iter().get_dest(), taken, not_taken, cnt, prob_str); 1354 } 1355 return prob; 1356 } 1357 1358 //-----------------------------branch_prediction------------------------------- 1359 float Parse::branch_prediction(float& cnt, 1360 BoolTest::mask btest, 1361 int target_bci, 1362 Node* test) { 1363 float prob = dynamic_branch_prediction(cnt, btest, test); 1364 // If prob is unknown, switch to static prediction 1365 if (prob != PROB_UNKNOWN) return prob; 1366 1367 prob = PROB_FAIR; // Set default value 1368 if (btest == BoolTest::eq) // Exactly equal test? 1369 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 1370 else if (btest == BoolTest::ne) 1371 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 1372 1373 // If this is a conditional test guarding a backwards branch, 1374 // assume its a loop-back edge. Make it a likely taken branch. 1375 if (target_bci < bci()) { 1376 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 1377 // Since it's an OSR, we probably have profile data, but since 1378 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 1379 // Let's make a special check here for completely zero counts. 1380 ciMethodData* methodData = method()->method_data(); 1381 if (!methodData->is_empty()) { 1382 ciProfileData* data = methodData->bci_to_data(bci()); 1383 // Only stop for truly zero counts, which mean an unknown part 1384 // of the OSR-ed method, and we want to deopt to gather more stats. 1385 // If you have ANY counts, then this loop is simply 'cold' relative 1386 // to the OSR loop. 1387 if (data == NULL || 1388 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 1389 // This is the only way to return PROB_UNKNOWN: 1390 return PROB_UNKNOWN; 1391 } 1392 } 1393 } 1394 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 1395 } 1396 1397 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 1398 return prob; 1399 } 1400 1401 // The magic constants are chosen so as to match the output of 1402 // branch_prediction() when the profile reports a zero taken count. 1403 // It is important to distinguish zero counts unambiguously, because 1404 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 1405 // very small but nonzero probabilities, which if confused with zero 1406 // counts would keep the program recompiling indefinitely. 1407 bool Parse::seems_never_taken(float prob) const { 1408 return prob < PROB_MIN; 1409 } 1410 1411 // True if the comparison seems to be the kind that will not change its 1412 // statistics from true to false. See comments in adjust_map_after_if. 1413 // This question is only asked along paths which are already 1414 // classifed as untaken (by seems_never_taken), so really, 1415 // if a path is never taken, its controlling comparison is 1416 // already acting in a stable fashion. If the comparison 1417 // seems stable, we will put an expensive uncommon trap 1418 // on the untaken path. 1419 bool Parse::seems_stable_comparison() const { 1420 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 1421 return false; 1422 } 1423 return true; 1424 } 1425 1426 //-------------------------------repush_if_args-------------------------------- 1427 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 1428 inline int Parse::repush_if_args() { 1429 if (PrintOpto && WizardMode) { 1430 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 1431 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 1432 method()->print_name(); tty->cr(); 1433 } 1434 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 1435 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 1436 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 1437 assert(argument(0) != NULL, "must exist"); 1438 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 1439 inc_sp(bc_depth); 1440 return bc_depth; 1441 } 1442 1443 //----------------------------------do_ifnull---------------------------------- 1444 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 1445 int target_bci = iter().get_dest(); 1446 1447 Block* branch_block = successor_for_bci(target_bci); 1448 Block* next_block = successor_for_bci(iter().next_bci()); 1449 1450 float cnt; 1451 float prob = branch_prediction(cnt, btest, target_bci, c); 1452 if (prob == PROB_UNKNOWN) { 1453 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 1454 if (PrintOpto && Verbose) { 1455 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1456 } 1457 repush_if_args(); // to gather stats on loop 1458 // We need to mark this branch as taken so that if we recompile we will 1459 // see that it is possible. In the tiered system the interpreter doesn't 1460 // do profiling and by the time we get to the lower tier from the interpreter 1461 // the path may be cold again. Make sure it doesn't look untaken 1462 profile_taken_branch(target_bci, !ProfileInterpreter); 1463 uncommon_trap(Deoptimization::Reason_unreached, 1464 Deoptimization::Action_reinterpret, 1465 NULL, "cold"); 1466 if (C->eliminate_boxing()) { 1467 // Mark the successor blocks as parsed 1468 branch_block->next_path_num(); 1469 next_block->next_path_num(); 1470 } 1471 return; 1472 } 1473 1474 NOT_PRODUCT(explicit_null_checks_inserted++); 1475 1476 // Generate real control flow 1477 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1478 1479 // Sanity check the probability value 1480 assert(prob > 0.0f,"Bad probability in Parser"); 1481 // Need xform to put node in hash table 1482 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1483 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1484 // True branch 1485 { PreserveJVMState pjvms(this); 1486 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1487 set_control(iftrue); 1488 1489 if (stopped()) { // Path is dead? 1490 NOT_PRODUCT(explicit_null_checks_elided++); 1491 if (C->eliminate_boxing()) { 1492 // Mark the successor block as parsed 1493 branch_block->next_path_num(); 1494 } 1495 } else { // Path is live. 1496 // Update method data 1497 profile_taken_branch(target_bci); 1498 adjust_map_after_if(btest, c, prob, branch_block, next_block); 1499 if (!stopped()) { 1500 merge(target_bci); 1501 } 1502 } 1503 } 1504 1505 // False branch 1506 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1507 set_control(iffalse); 1508 1509 if (stopped()) { // Path is dead? 1510 NOT_PRODUCT(explicit_null_checks_elided++); 1511 if (C->eliminate_boxing()) { 1512 // Mark the successor block as parsed 1513 next_block->next_path_num(); 1514 } 1515 } else { // Path is live. 1516 // Update method data 1517 profile_not_taken_branch(); 1518 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, 1519 next_block, branch_block); 1520 } 1521 } 1522 1523 //------------------------------------do_if------------------------------------ 1524 void Parse::do_if(BoolTest::mask btest, Node* c) { 1525 int target_bci = iter().get_dest(); 1526 1527 Block* branch_block = successor_for_bci(target_bci); 1528 Block* next_block = successor_for_bci(iter().next_bci()); 1529 1530 float cnt; 1531 float prob = branch_prediction(cnt, btest, target_bci, c); 1532 float untaken_prob = 1.0 - prob; 1533 1534 if (prob == PROB_UNKNOWN) { 1535 if (PrintOpto && Verbose) { 1536 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1537 } 1538 repush_if_args(); // to gather stats on loop 1539 // We need to mark this branch as taken so that if we recompile we will 1540 // see that it is possible. In the tiered system the interpreter doesn't 1541 // do profiling and by the time we get to the lower tier from the interpreter 1542 // the path may be cold again. Make sure it doesn't look untaken 1543 profile_taken_branch(target_bci, !ProfileInterpreter); 1544 uncommon_trap(Deoptimization::Reason_unreached, 1545 Deoptimization::Action_reinterpret, 1546 NULL, "cold"); 1547 if (C->eliminate_boxing()) { 1548 // Mark the successor blocks as parsed 1549 branch_block->next_path_num(); 1550 next_block->next_path_num(); 1551 } 1552 return; 1553 } 1554 1555 // Sanity check the probability value 1556 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1557 1558 bool taken_if_true = true; 1559 // Convert BoolTest to canonical form: 1560 if (!BoolTest(btest).is_canonical()) { 1561 btest = BoolTest(btest).negate(); 1562 taken_if_true = false; 1563 // prob is NOT updated here; it remains the probability of the taken 1564 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1565 } 1566 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1567 1568 Node* tst0 = new BoolNode(c, btest); 1569 Node* tst = _gvn.transform(tst0); 1570 BoolTest::mask taken_btest = BoolTest::illegal; 1571 BoolTest::mask untaken_btest = BoolTest::illegal; 1572 1573 if (tst->is_Bool()) { 1574 // Refresh c from the transformed bool node, since it may be 1575 // simpler than the original c. Also re-canonicalize btest. 1576 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1577 // That can arise from statements like: if (x instanceof C) ... 1578 if (tst != tst0) { 1579 // Canonicalize one more time since transform can change it. 1580 btest = tst->as_Bool()->_test._test; 1581 if (!BoolTest(btest).is_canonical()) { 1582 // Reverse edges one more time... 1583 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1584 btest = tst->as_Bool()->_test._test; 1585 assert(BoolTest(btest).is_canonical(), "sanity"); 1586 taken_if_true = !taken_if_true; 1587 } 1588 c = tst->in(1); 1589 } 1590 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1591 taken_btest = taken_if_true ? btest : neg_btest; 1592 untaken_btest = taken_if_true ? neg_btest : btest; 1593 } 1594 1595 // Generate real control flow 1596 float true_prob = (taken_if_true ? prob : untaken_prob); 1597 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1598 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1599 Node* taken_branch = new IfTrueNode(iff); 1600 Node* untaken_branch = new IfFalseNode(iff); 1601 if (!taken_if_true) { // Finish conversion to canonical form 1602 Node* tmp = taken_branch; 1603 taken_branch = untaken_branch; 1604 untaken_branch = tmp; 1605 } 1606 1607 // Branch is taken: 1608 { PreserveJVMState pjvms(this); 1609 taken_branch = _gvn.transform(taken_branch); 1610 set_control(taken_branch); 1611 1612 if (stopped()) { 1613 if (C->eliminate_boxing()) { 1614 // Mark the successor block as parsed 1615 branch_block->next_path_num(); 1616 } 1617 } else { 1618 // Update method data 1619 profile_taken_branch(target_bci); 1620 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block); 1621 if (!stopped()) { 1622 merge(target_bci); 1623 } 1624 } 1625 } 1626 1627 untaken_branch = _gvn.transform(untaken_branch); 1628 set_control(untaken_branch); 1629 1630 // Branch not taken. 1631 if (stopped()) { 1632 if (C->eliminate_boxing()) { 1633 // Mark the successor block as parsed 1634 next_block->next_path_num(); 1635 } 1636 } else { 1637 // Update method data 1638 profile_not_taken_branch(); 1639 adjust_map_after_if(untaken_btest, c, untaken_prob, 1640 next_block, branch_block); 1641 } 1642 } 1643 1644 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 1645 // Don't want to speculate on uncommon traps when running with -Xcomp 1646 if (!UseInterpreter) { 1647 return false; 1648 } 1649 return (seems_never_taken(prob) && seems_stable_comparison()); 1650 } 1651 1652 //----------------------------adjust_map_after_if------------------------------ 1653 // Adjust the JVM state to reflect the result of taking this path. 1654 // Basically, it means inspecting the CmpNode controlling this 1655 // branch, seeing how it constrains a tested value, and then 1656 // deciding if it's worth our while to encode this constraint 1657 // as graph nodes in the current abstract interpretation map. 1658 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, 1659 Block* path, Block* other_path) { 1660 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal) 1661 return; // nothing to do 1662 1663 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 1664 1665 if (path_is_suitable_for_uncommon_trap(prob)) { 1666 repush_if_args(); 1667 uncommon_trap(Deoptimization::Reason_unstable_if, 1668 Deoptimization::Action_reinterpret, 1669 NULL, 1670 (is_fallthrough ? "taken always" : "taken never")); 1671 return; 1672 } 1673 1674 Node* val = c->in(1); 1675 Node* con = c->in(2); 1676 const Type* tcon = _gvn.type(con); 1677 const Type* tval = _gvn.type(val); 1678 bool have_con = tcon->singleton(); 1679 if (tval->singleton()) { 1680 if (!have_con) { 1681 // Swap, so constant is in con. 1682 con = val; 1683 tcon = tval; 1684 val = c->in(2); 1685 tval = _gvn.type(val); 1686 btest = BoolTest(btest).commute(); 1687 have_con = true; 1688 } else { 1689 // Do we have two constants? Then leave well enough alone. 1690 have_con = false; 1691 } 1692 } 1693 if (!have_con) // remaining adjustments need a con 1694 return; 1695 1696 sharpen_type_after_if(btest, con, tcon, val, tval); 1697 } 1698 1699 1700 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 1701 Node* ldk; 1702 if (n->is_DecodeNKlass()) { 1703 if (n->in(1)->Opcode() != Op_LoadNKlass) { 1704 return NULL; 1705 } else { 1706 ldk = n->in(1); 1707 } 1708 } else if (n->Opcode() != Op_LoadKlass) { 1709 return NULL; 1710 } else { 1711 ldk = n; 1712 } 1713 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 1714 1715 Node* adr = ldk->in(MemNode::Address); 1716 intptr_t off = 0; 1717 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 1718 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 1719 return NULL; 1720 const TypePtr* tp = gvn->type(obj)->is_ptr(); 1721 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 1722 return NULL; 1723 1724 return obj; 1725 } 1726 1727 void Parse::sharpen_type_after_if(BoolTest::mask btest, 1728 Node* con, const Type* tcon, 1729 Node* val, const Type* tval) { 1730 // Look for opportunities to sharpen the type of a node 1731 // whose klass is compared with a constant klass. 1732 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 1733 Node* obj = extract_obj_from_klass_load(&_gvn, val); 1734 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 1735 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 1736 // Found: 1737 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 1738 // or the narrowOop equivalent. 1739 const Type* obj_type = _gvn.type(obj); 1740 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 1741 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 1742 tboth->higher_equal(obj_type)) { 1743 // obj has to be of the exact type Foo if the CmpP succeeds. 1744 int obj_in_map = map()->find_edge(obj); 1745 JVMState* jvms = this->jvms(); 1746 if (obj_in_map >= 0 && 1747 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 1748 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 1749 const Type* tcc = ccast->as_Type()->type(); 1750 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 1751 // Delay transform() call to allow recovery of pre-cast value 1752 // at the control merge. 1753 _gvn.set_type_bottom(ccast); 1754 record_for_igvn(ccast); 1755 // Here's the payoff. 1756 replace_in_map(obj, ccast); 1757 } 1758 } 1759 } 1760 } 1761 1762 int val_in_map = map()->find_edge(val); 1763 if (val_in_map < 0) return; // replace_in_map would be useless 1764 { 1765 JVMState* jvms = this->jvms(); 1766 if (!(jvms->is_loc(val_in_map) || 1767 jvms->is_stk(val_in_map))) 1768 return; // again, it would be useless 1769 } 1770 1771 // Check for a comparison to a constant, and "know" that the compared 1772 // value is constrained on this path. 1773 assert(tcon->singleton(), ""); 1774 ConstraintCastNode* ccast = NULL; 1775 Node* cast = NULL; 1776 1777 switch (btest) { 1778 case BoolTest::eq: // Constant test? 1779 { 1780 const Type* tboth = tcon->join_speculative(tval); 1781 if (tboth == tval) break; // Nothing to gain. 1782 if (tcon->isa_int()) { 1783 ccast = new CastIINode(val, tboth); 1784 } else if (tcon == TypePtr::NULL_PTR) { 1785 // Cast to null, but keep the pointer identity temporarily live. 1786 ccast = new CastPPNode(val, tboth); 1787 } else { 1788 const TypeF* tf = tcon->isa_float_constant(); 1789 const TypeD* td = tcon->isa_double_constant(); 1790 // Exclude tests vs float/double 0 as these could be 1791 // either +0 or -0. Just because you are equal to +0 1792 // doesn't mean you ARE +0! 1793 // Note, following code also replaces Long and Oop values. 1794 if ((!tf || tf->_f != 0.0) && 1795 (!td || td->_d != 0.0)) 1796 cast = con; // Replace non-constant val by con. 1797 } 1798 } 1799 break; 1800 1801 case BoolTest::ne: 1802 if (tcon == TypePtr::NULL_PTR) { 1803 cast = cast_not_null(val, false); 1804 } 1805 break; 1806 1807 default: 1808 // (At this point we could record int range types with CastII.) 1809 break; 1810 } 1811 1812 if (ccast != NULL) { 1813 const Type* tcc = ccast->as_Type()->type(); 1814 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 1815 // Delay transform() call to allow recovery of pre-cast value 1816 // at the control merge. 1817 ccast->set_req(0, control()); 1818 _gvn.set_type_bottom(ccast); 1819 record_for_igvn(ccast); 1820 cast = ccast; 1821 } 1822 1823 if (cast != NULL) { // Here's the payoff. 1824 replace_in_map(val, cast); 1825 } 1826 } 1827 1828 /** 1829 * Use speculative type to optimize CmpP node: if comparison is 1830 * against the low level class, cast the object to the speculative 1831 * type if any. CmpP should then go away. 1832 * 1833 * @param c expected CmpP node 1834 * @return result of CmpP on object casted to speculative type 1835 * 1836 */ 1837 Node* Parse::optimize_cmp_with_klass(Node* c) { 1838 // If this is transformed by the _gvn to a comparison with the low 1839 // level klass then we may be able to use speculation 1840 if (c->Opcode() == Op_CmpP && 1841 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 1842 c->in(2)->is_Con()) { 1843 Node* load_klass = NULL; 1844 Node* decode = NULL; 1845 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 1846 decode = c->in(1); 1847 load_klass = c->in(1)->in(1); 1848 } else { 1849 load_klass = c->in(1); 1850 } 1851 if (load_klass->in(2)->is_AddP()) { 1852 Node* addp = load_klass->in(2); 1853 Node* obj = addp->in(AddPNode::Address); 1854 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 1855 if (obj_type->speculative_type_not_null() != NULL) { 1856 ciKlass* k = obj_type->speculative_type(); 1857 inc_sp(2); 1858 obj = maybe_cast_profiled_obj(obj, k); 1859 dec_sp(2); 1860 // Make the CmpP use the casted obj 1861 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 1862 load_klass = load_klass->clone(); 1863 load_klass->set_req(2, addp); 1864 load_klass = _gvn.transform(load_klass); 1865 if (decode != NULL) { 1866 decode = decode->clone(); 1867 decode->set_req(1, load_klass); 1868 load_klass = _gvn.transform(decode); 1869 } 1870 c = c->clone(); 1871 c->set_req(1, load_klass); 1872 c = _gvn.transform(c); 1873 } 1874 } 1875 } 1876 return c; 1877 } 1878 1879 //------------------------------do_one_bytecode-------------------------------- 1880 // Parse this bytecode, and alter the Parsers JVM->Node mapping 1881 void Parse::do_one_bytecode() { 1882 Node *a, *b, *c, *d; // Handy temps 1883 BoolTest::mask btest; 1884 int i; 1885 1886 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 1887 1888 if (C->check_node_count(NodeLimitFudgeFactor * 5, 1889 "out of nodes parsing method")) { 1890 return; 1891 } 1892 1893 #ifdef ASSERT 1894 // for setting breakpoints 1895 if (TraceOptoParse) { 1896 tty->print(" @"); 1897 dump_bci(bci()); 1898 tty->cr(); 1899 } 1900 #endif 1901 1902 switch (bc()) { 1903 case Bytecodes::_nop: 1904 // do nothing 1905 break; 1906 case Bytecodes::_lconst_0: 1907 push_pair(longcon(0)); 1908 break; 1909 1910 case Bytecodes::_lconst_1: 1911 push_pair(longcon(1)); 1912 break; 1913 1914 case Bytecodes::_fconst_0: 1915 push(zerocon(T_FLOAT)); 1916 break; 1917 1918 case Bytecodes::_fconst_1: 1919 push(makecon(TypeF::ONE)); 1920 break; 1921 1922 case Bytecodes::_fconst_2: 1923 push(makecon(TypeF::make(2.0f))); 1924 break; 1925 1926 case Bytecodes::_dconst_0: 1927 push_pair(zerocon(T_DOUBLE)); 1928 break; 1929 1930 case Bytecodes::_dconst_1: 1931 push_pair(makecon(TypeD::ONE)); 1932 break; 1933 1934 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 1935 case Bytecodes::_iconst_0: push(intcon( 0)); break; 1936 case Bytecodes::_iconst_1: push(intcon( 1)); break; 1937 case Bytecodes::_iconst_2: push(intcon( 2)); break; 1938 case Bytecodes::_iconst_3: push(intcon( 3)); break; 1939 case Bytecodes::_iconst_4: push(intcon( 4)); break; 1940 case Bytecodes::_iconst_5: push(intcon( 5)); break; 1941 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 1942 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 1943 case Bytecodes::_aconst_null: push(null()); break; 1944 case Bytecodes::_ldc: 1945 case Bytecodes::_ldc_w: 1946 case Bytecodes::_ldc2_w: 1947 // If the constant is unresolved, run this BC once in the interpreter. 1948 { 1949 ciConstant constant = iter().get_constant(); 1950 if (!constant.is_valid() || 1951 (constant.basic_type() == T_OBJECT && 1952 !constant.as_object()->is_loaded())) { 1953 int index = iter().get_constant_pool_index(); 1954 constantTag tag = iter().get_constant_pool_tag(index); 1955 uncommon_trap(Deoptimization::make_trap_request 1956 (Deoptimization::Reason_unloaded, 1957 Deoptimization::Action_reinterpret, 1958 index), 1959 NULL, tag.internal_name()); 1960 break; 1961 } 1962 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 1963 "must be java_mirror of klass"); 1964 const Type* con_type = Type::make_from_constant(constant); 1965 if (con_type != NULL) { 1966 push_node(con_type->basic_type(), makecon(con_type)); 1967 } 1968 } 1969 1970 break; 1971 1972 case Bytecodes::_aload_0: 1973 push( local(0) ); 1974 break; 1975 case Bytecodes::_aload_1: 1976 push( local(1) ); 1977 break; 1978 case Bytecodes::_aload_2: 1979 push( local(2) ); 1980 break; 1981 case Bytecodes::_aload_3: 1982 push( local(3) ); 1983 break; 1984 case Bytecodes::_aload: 1985 push( local(iter().get_index()) ); 1986 break; 1987 1988 case Bytecodes::_fload_0: 1989 case Bytecodes::_iload_0: 1990 push( local(0) ); 1991 break; 1992 case Bytecodes::_fload_1: 1993 case Bytecodes::_iload_1: 1994 push( local(1) ); 1995 break; 1996 case Bytecodes::_fload_2: 1997 case Bytecodes::_iload_2: 1998 push( local(2) ); 1999 break; 2000 case Bytecodes::_fload_3: 2001 case Bytecodes::_iload_3: 2002 push( local(3) ); 2003 break; 2004 case Bytecodes::_fload: 2005 case Bytecodes::_iload: 2006 push( local(iter().get_index()) ); 2007 break; 2008 case Bytecodes::_lload_0: 2009 push_pair_local( 0 ); 2010 break; 2011 case Bytecodes::_lload_1: 2012 push_pair_local( 1 ); 2013 break; 2014 case Bytecodes::_lload_2: 2015 push_pair_local( 2 ); 2016 break; 2017 case Bytecodes::_lload_3: 2018 push_pair_local( 3 ); 2019 break; 2020 case Bytecodes::_lload: 2021 push_pair_local( iter().get_index() ); 2022 break; 2023 2024 case Bytecodes::_dload_0: 2025 push_pair_local(0); 2026 break; 2027 case Bytecodes::_dload_1: 2028 push_pair_local(1); 2029 break; 2030 case Bytecodes::_dload_2: 2031 push_pair_local(2); 2032 break; 2033 case Bytecodes::_dload_3: 2034 push_pair_local(3); 2035 break; 2036 case Bytecodes::_dload: 2037 push_pair_local(iter().get_index()); 2038 break; 2039 case Bytecodes::_fstore_0: 2040 case Bytecodes::_istore_0: 2041 case Bytecodes::_astore_0: 2042 set_local( 0, pop() ); 2043 break; 2044 case Bytecodes::_fstore_1: 2045 case Bytecodes::_istore_1: 2046 case Bytecodes::_astore_1: 2047 set_local( 1, pop() ); 2048 break; 2049 case Bytecodes::_fstore_2: 2050 case Bytecodes::_istore_2: 2051 case Bytecodes::_astore_2: 2052 set_local( 2, pop() ); 2053 break; 2054 case Bytecodes::_fstore_3: 2055 case Bytecodes::_istore_3: 2056 case Bytecodes::_astore_3: 2057 set_local( 3, pop() ); 2058 break; 2059 case Bytecodes::_fstore: 2060 case Bytecodes::_istore: 2061 case Bytecodes::_astore: 2062 set_local( iter().get_index(), pop() ); 2063 break; 2064 // long stores 2065 case Bytecodes::_lstore_0: 2066 set_pair_local( 0, pop_pair() ); 2067 break; 2068 case Bytecodes::_lstore_1: 2069 set_pair_local( 1, pop_pair() ); 2070 break; 2071 case Bytecodes::_lstore_2: 2072 set_pair_local( 2, pop_pair() ); 2073 break; 2074 case Bytecodes::_lstore_3: 2075 set_pair_local( 3, pop_pair() ); 2076 break; 2077 case Bytecodes::_lstore: 2078 set_pair_local( iter().get_index(), pop_pair() ); 2079 break; 2080 2081 // double stores 2082 case Bytecodes::_dstore_0: 2083 set_pair_local( 0, dstore_rounding(pop_pair()) ); 2084 break; 2085 case Bytecodes::_dstore_1: 2086 set_pair_local( 1, dstore_rounding(pop_pair()) ); 2087 break; 2088 case Bytecodes::_dstore_2: 2089 set_pair_local( 2, dstore_rounding(pop_pair()) ); 2090 break; 2091 case Bytecodes::_dstore_3: 2092 set_pair_local( 3, dstore_rounding(pop_pair()) ); 2093 break; 2094 case Bytecodes::_dstore: 2095 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 2096 break; 2097 2098 case Bytecodes::_pop: dec_sp(1); break; 2099 case Bytecodes::_pop2: dec_sp(2); break; 2100 case Bytecodes::_swap: 2101 a = pop(); 2102 b = pop(); 2103 push(a); 2104 push(b); 2105 break; 2106 case Bytecodes::_dup: 2107 a = pop(); 2108 push(a); 2109 push(a); 2110 break; 2111 case Bytecodes::_dup_x1: 2112 a = pop(); 2113 b = pop(); 2114 push( a ); 2115 push( b ); 2116 push( a ); 2117 break; 2118 case Bytecodes::_dup_x2: 2119 a = pop(); 2120 b = pop(); 2121 c = pop(); 2122 push( a ); 2123 push( c ); 2124 push( b ); 2125 push( a ); 2126 break; 2127 case Bytecodes::_dup2: 2128 a = pop(); 2129 b = pop(); 2130 push( b ); 2131 push( a ); 2132 push( b ); 2133 push( a ); 2134 break; 2135 2136 case Bytecodes::_dup2_x1: 2137 // before: .. c, b, a 2138 // after: .. b, a, c, b, a 2139 // not tested 2140 a = pop(); 2141 b = pop(); 2142 c = pop(); 2143 push( b ); 2144 push( a ); 2145 push( c ); 2146 push( b ); 2147 push( a ); 2148 break; 2149 case Bytecodes::_dup2_x2: 2150 // before: .. d, c, b, a 2151 // after: .. b, a, d, c, b, a 2152 // not tested 2153 a = pop(); 2154 b = pop(); 2155 c = pop(); 2156 d = pop(); 2157 push( b ); 2158 push( a ); 2159 push( d ); 2160 push( c ); 2161 push( b ); 2162 push( a ); 2163 break; 2164 2165 case Bytecodes::_arraylength: { 2166 // Must do null-check with value on expression stack 2167 Node *ary = null_check(peek(), T_ARRAY); 2168 // Compile-time detect of null-exception? 2169 if (stopped()) return; 2170 a = pop(); 2171 push(load_array_length(a)); 2172 break; 2173 } 2174 2175 case Bytecodes::_baload: array_load(T_BYTE); break; 2176 case Bytecodes::_caload: array_load(T_CHAR); break; 2177 case Bytecodes::_iaload: array_load(T_INT); break; 2178 case Bytecodes::_saload: array_load(T_SHORT); break; 2179 case Bytecodes::_faload: array_load(T_FLOAT); break; 2180 case Bytecodes::_aaload: array_load(T_OBJECT); break; 2181 case Bytecodes::_laload: array_load(T_LONG); break; 2182 case Bytecodes::_daload: array_load(T_DOUBLE); break; 2183 case Bytecodes::_bastore: array_store(T_BYTE); break; 2184 case Bytecodes::_castore: array_store(T_CHAR); break; 2185 case Bytecodes::_iastore: array_store(T_INT); break; 2186 case Bytecodes::_sastore: array_store(T_SHORT); break; 2187 case Bytecodes::_fastore: array_store(T_FLOAT); break; 2188 case Bytecodes::_aastore: array_store(T_OBJECT); break; 2189 case Bytecodes::_lastore: array_store(T_LONG); break; 2190 case Bytecodes::_dastore: array_store(T_DOUBLE); break; 2191 2192 case Bytecodes::_getfield: 2193 do_getfield(); 2194 break; 2195 2196 case Bytecodes::_getstatic: 2197 do_getstatic(); 2198 break; 2199 2200 case Bytecodes::_putfield: 2201 do_putfield(); 2202 break; 2203 2204 case Bytecodes::_putstatic: 2205 do_putstatic(); 2206 break; 2207 2208 case Bytecodes::_irem: 2209 do_irem(); 2210 break; 2211 case Bytecodes::_idiv: 2212 // Must keep both values on the expression-stack during null-check 2213 zero_check_int(peek()); 2214 // Compile-time detect of null-exception? 2215 if (stopped()) return; 2216 b = pop(); 2217 a = pop(); 2218 push( _gvn.transform( new DivINode(control(),a,b) ) ); 2219 break; 2220 case Bytecodes::_imul: 2221 b = pop(); a = pop(); 2222 push( _gvn.transform( new MulINode(a,b) ) ); 2223 break; 2224 case Bytecodes::_iadd: 2225 b = pop(); a = pop(); 2226 push( _gvn.transform( new AddINode(a,b) ) ); 2227 break; 2228 case Bytecodes::_ineg: 2229 a = pop(); 2230 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 2231 break; 2232 case Bytecodes::_isub: 2233 b = pop(); a = pop(); 2234 push( _gvn.transform( new SubINode(a,b) ) ); 2235 break; 2236 case Bytecodes::_iand: 2237 b = pop(); a = pop(); 2238 push( _gvn.transform( new AndINode(a,b) ) ); 2239 break; 2240 case Bytecodes::_ior: 2241 b = pop(); a = pop(); 2242 push( _gvn.transform( new OrINode(a,b) ) ); 2243 break; 2244 case Bytecodes::_ixor: 2245 b = pop(); a = pop(); 2246 push( _gvn.transform( new XorINode(a,b) ) ); 2247 break; 2248 case Bytecodes::_ishl: 2249 b = pop(); a = pop(); 2250 push( _gvn.transform( new LShiftINode(a,b) ) ); 2251 break; 2252 case Bytecodes::_ishr: 2253 b = pop(); a = pop(); 2254 push( _gvn.transform( new RShiftINode(a,b) ) ); 2255 break; 2256 case Bytecodes::_iushr: 2257 b = pop(); a = pop(); 2258 push( _gvn.transform( new URShiftINode(a,b) ) ); 2259 break; 2260 2261 case Bytecodes::_fneg: 2262 a = pop(); 2263 b = _gvn.transform(new NegFNode (a)); 2264 push(b); 2265 break; 2266 2267 case Bytecodes::_fsub: 2268 b = pop(); 2269 a = pop(); 2270 c = _gvn.transform( new SubFNode(a,b) ); 2271 d = precision_rounding(c); 2272 push( d ); 2273 break; 2274 2275 case Bytecodes::_fadd: 2276 b = pop(); 2277 a = pop(); 2278 c = _gvn.transform( new AddFNode(a,b) ); 2279 d = precision_rounding(c); 2280 push( d ); 2281 break; 2282 2283 case Bytecodes::_fmul: 2284 b = pop(); 2285 a = pop(); 2286 c = _gvn.transform( new MulFNode(a,b) ); 2287 d = precision_rounding(c); 2288 push( d ); 2289 break; 2290 2291 case Bytecodes::_fdiv: 2292 b = pop(); 2293 a = pop(); 2294 c = _gvn.transform( new DivFNode(0,a,b) ); 2295 d = precision_rounding(c); 2296 push( d ); 2297 break; 2298 2299 case Bytecodes::_frem: 2300 if (Matcher::has_match_rule(Op_ModF)) { 2301 // Generate a ModF node. 2302 b = pop(); 2303 a = pop(); 2304 c = _gvn.transform( new ModFNode(0,a,b) ); 2305 d = precision_rounding(c); 2306 push( d ); 2307 } 2308 else { 2309 // Generate a call. 2310 modf(); 2311 } 2312 break; 2313 2314 case Bytecodes::_fcmpl: 2315 b = pop(); 2316 a = pop(); 2317 c = _gvn.transform( new CmpF3Node( a, b)); 2318 push(c); 2319 break; 2320 case Bytecodes::_fcmpg: 2321 b = pop(); 2322 a = pop(); 2323 2324 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 2325 // which negates the result sign except for unordered. Flip the unordered 2326 // as well by using CmpF3 which implements unordered-lesser instead of 2327 // unordered-greater semantics. Finally, commute the result bits. Result 2328 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 2329 c = _gvn.transform( new CmpF3Node( b, a)); 2330 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2331 push(c); 2332 break; 2333 2334 case Bytecodes::_f2i: 2335 a = pop(); 2336 push(_gvn.transform(new ConvF2INode(a))); 2337 break; 2338 2339 case Bytecodes::_d2i: 2340 a = pop_pair(); 2341 b = _gvn.transform(new ConvD2INode(a)); 2342 push( b ); 2343 break; 2344 2345 case Bytecodes::_f2d: 2346 a = pop(); 2347 b = _gvn.transform( new ConvF2DNode(a)); 2348 push_pair( b ); 2349 break; 2350 2351 case Bytecodes::_d2f: 2352 a = pop_pair(); 2353 b = _gvn.transform( new ConvD2FNode(a)); 2354 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 2355 //b = _gvn.transform(new RoundFloatNode(0, b) ); 2356 push( b ); 2357 break; 2358 2359 case Bytecodes::_l2f: 2360 if (Matcher::convL2FSupported()) { 2361 a = pop_pair(); 2362 b = _gvn.transform( new ConvL2FNode(a)); 2363 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 2364 // Rather than storing the result into an FP register then pushing 2365 // out to memory to round, the machine instruction that implements 2366 // ConvL2D is responsible for rounding. 2367 // c = precision_rounding(b); 2368 c = _gvn.transform(b); 2369 push(c); 2370 } else { 2371 l2f(); 2372 } 2373 break; 2374 2375 case Bytecodes::_l2d: 2376 a = pop_pair(); 2377 b = _gvn.transform( new ConvL2DNode(a)); 2378 // For i486.ad, rounding is always necessary (see _l2f above). 2379 // c = dprecision_rounding(b); 2380 c = _gvn.transform(b); 2381 push_pair(c); 2382 break; 2383 2384 case Bytecodes::_f2l: 2385 a = pop(); 2386 b = _gvn.transform( new ConvF2LNode(a)); 2387 push_pair(b); 2388 break; 2389 2390 case Bytecodes::_d2l: 2391 a = pop_pair(); 2392 b = _gvn.transform( new ConvD2LNode(a)); 2393 push_pair(b); 2394 break; 2395 2396 case Bytecodes::_dsub: 2397 b = pop_pair(); 2398 a = pop_pair(); 2399 c = _gvn.transform( new SubDNode(a,b) ); 2400 d = dprecision_rounding(c); 2401 push_pair( d ); 2402 break; 2403 2404 case Bytecodes::_dadd: 2405 b = pop_pair(); 2406 a = pop_pair(); 2407 c = _gvn.transform( new AddDNode(a,b) ); 2408 d = dprecision_rounding(c); 2409 push_pair( d ); 2410 break; 2411 2412 case Bytecodes::_dmul: 2413 b = pop_pair(); 2414 a = pop_pair(); 2415 c = _gvn.transform( new MulDNode(a,b) ); 2416 d = dprecision_rounding(c); 2417 push_pair( d ); 2418 break; 2419 2420 case Bytecodes::_ddiv: 2421 b = pop_pair(); 2422 a = pop_pair(); 2423 c = _gvn.transform( new DivDNode(0,a,b) ); 2424 d = dprecision_rounding(c); 2425 push_pair( d ); 2426 break; 2427 2428 case Bytecodes::_dneg: 2429 a = pop_pair(); 2430 b = _gvn.transform(new NegDNode (a)); 2431 push_pair(b); 2432 break; 2433 2434 case Bytecodes::_drem: 2435 if (Matcher::has_match_rule(Op_ModD)) { 2436 // Generate a ModD node. 2437 b = pop_pair(); 2438 a = pop_pair(); 2439 // a % b 2440 2441 c = _gvn.transform( new ModDNode(0,a,b) ); 2442 d = dprecision_rounding(c); 2443 push_pair( d ); 2444 } 2445 else { 2446 // Generate a call. 2447 modd(); 2448 } 2449 break; 2450 2451 case Bytecodes::_dcmpl: 2452 b = pop_pair(); 2453 a = pop_pair(); 2454 c = _gvn.transform( new CmpD3Node( a, b)); 2455 push(c); 2456 break; 2457 2458 case Bytecodes::_dcmpg: 2459 b = pop_pair(); 2460 a = pop_pair(); 2461 // Same as dcmpl but need to flip the unordered case. 2462 // Commute the inputs, which negates the result sign except for unordered. 2463 // Flip the unordered as well by using CmpD3 which implements 2464 // unordered-lesser instead of unordered-greater semantics. 2465 // Finally, negate the result bits. Result is same as using a 2466 // CmpD3Greater except we did it with CmpD3 alone. 2467 c = _gvn.transform( new CmpD3Node( b, a)); 2468 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2469 push(c); 2470 break; 2471 2472 2473 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2474 case Bytecodes::_land: 2475 b = pop_pair(); 2476 a = pop_pair(); 2477 c = _gvn.transform( new AndLNode(a,b) ); 2478 push_pair(c); 2479 break; 2480 case Bytecodes::_lor: 2481 b = pop_pair(); 2482 a = pop_pair(); 2483 c = _gvn.transform( new OrLNode(a,b) ); 2484 push_pair(c); 2485 break; 2486 case Bytecodes::_lxor: 2487 b = pop_pair(); 2488 a = pop_pair(); 2489 c = _gvn.transform( new XorLNode(a,b) ); 2490 push_pair(c); 2491 break; 2492 2493 case Bytecodes::_lshl: 2494 b = pop(); // the shift count 2495 a = pop_pair(); // value to be shifted 2496 c = _gvn.transform( new LShiftLNode(a,b) ); 2497 push_pair(c); 2498 break; 2499 case Bytecodes::_lshr: 2500 b = pop(); // the shift count 2501 a = pop_pair(); // value to be shifted 2502 c = _gvn.transform( new RShiftLNode(a,b) ); 2503 push_pair(c); 2504 break; 2505 case Bytecodes::_lushr: 2506 b = pop(); // the shift count 2507 a = pop_pair(); // value to be shifted 2508 c = _gvn.transform( new URShiftLNode(a,b) ); 2509 push_pair(c); 2510 break; 2511 case Bytecodes::_lmul: 2512 b = pop_pair(); 2513 a = pop_pair(); 2514 c = _gvn.transform( new MulLNode(a,b) ); 2515 push_pair(c); 2516 break; 2517 2518 case Bytecodes::_lrem: 2519 // Must keep both values on the expression-stack during null-check 2520 assert(peek(0) == top(), "long word order"); 2521 zero_check_long(peek(1)); 2522 // Compile-time detect of null-exception? 2523 if (stopped()) return; 2524 b = pop_pair(); 2525 a = pop_pair(); 2526 c = _gvn.transform( new ModLNode(control(),a,b) ); 2527 push_pair(c); 2528 break; 2529 2530 case Bytecodes::_ldiv: 2531 // Must keep both values on the expression-stack during null-check 2532 assert(peek(0) == top(), "long word order"); 2533 zero_check_long(peek(1)); 2534 // Compile-time detect of null-exception? 2535 if (stopped()) return; 2536 b = pop_pair(); 2537 a = pop_pair(); 2538 c = _gvn.transform( new DivLNode(control(),a,b) ); 2539 push_pair(c); 2540 break; 2541 2542 case Bytecodes::_ladd: 2543 b = pop_pair(); 2544 a = pop_pair(); 2545 c = _gvn.transform( new AddLNode(a,b) ); 2546 push_pair(c); 2547 break; 2548 case Bytecodes::_lsub: 2549 b = pop_pair(); 2550 a = pop_pair(); 2551 c = _gvn.transform( new SubLNode(a,b) ); 2552 push_pair(c); 2553 break; 2554 case Bytecodes::_lcmp: 2555 // Safepoints are now inserted _before_ branches. The long-compare 2556 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 2557 // slew of control flow. These are usually followed by a CmpI vs zero and 2558 // a branch; this pattern then optimizes to the obvious long-compare and 2559 // branch. However, if the branch is backwards there's a Safepoint 2560 // inserted. The inserted Safepoint captures the JVM state at the 2561 // pre-branch point, i.e. it captures the 3-way value. Thus if a 2562 // long-compare is used to control a loop the debug info will force 2563 // computation of the 3-way value, even though the generated code uses a 2564 // long-compare and branch. We try to rectify the situation by inserting 2565 // a SafePoint here and have it dominate and kill the safepoint added at a 2566 // following backwards branch. At this point the JVM state merely holds 2 2567 // longs but not the 3-way value. 2568 if( UseLoopSafepoints ) { 2569 switch( iter().next_bc() ) { 2570 case Bytecodes::_ifgt: 2571 case Bytecodes::_iflt: 2572 case Bytecodes::_ifge: 2573 case Bytecodes::_ifle: 2574 case Bytecodes::_ifne: 2575 case Bytecodes::_ifeq: 2576 // If this is a backwards branch in the bytecodes, add Safepoint 2577 maybe_add_safepoint(iter().next_get_dest()); 2578 default: 2579 break; 2580 } 2581 } 2582 b = pop_pair(); 2583 a = pop_pair(); 2584 c = _gvn.transform( new CmpL3Node( a, b )); 2585 push(c); 2586 break; 2587 2588 case Bytecodes::_lneg: 2589 a = pop_pair(); 2590 b = _gvn.transform( new SubLNode(longcon(0),a)); 2591 push_pair(b); 2592 break; 2593 case Bytecodes::_l2i: 2594 a = pop_pair(); 2595 push( _gvn.transform( new ConvL2INode(a))); 2596 break; 2597 case Bytecodes::_i2l: 2598 a = pop(); 2599 b = _gvn.transform( new ConvI2LNode(a)); 2600 push_pair(b); 2601 break; 2602 case Bytecodes::_i2b: 2603 // Sign extend 2604 a = pop(); 2605 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); 2606 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); 2607 push( a ); 2608 break; 2609 case Bytecodes::_i2s: 2610 a = pop(); 2611 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); 2612 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); 2613 push( a ); 2614 break; 2615 case Bytecodes::_i2c: 2616 a = pop(); 2617 push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); 2618 break; 2619 2620 case Bytecodes::_i2f: 2621 a = pop(); 2622 b = _gvn.transform( new ConvI2FNode(a) ) ; 2623 c = precision_rounding(b); 2624 push (b); 2625 break; 2626 2627 case Bytecodes::_i2d: 2628 a = pop(); 2629 b = _gvn.transform( new ConvI2DNode(a)); 2630 push_pair(b); 2631 break; 2632 2633 case Bytecodes::_iinc: // Increment local 2634 i = iter().get_index(); // Get local index 2635 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 2636 break; 2637 2638 // Exit points of synchronized methods must have an unlock node 2639 case Bytecodes::_return: 2640 return_current(NULL); 2641 break; 2642 2643 case Bytecodes::_ireturn: 2644 case Bytecodes::_areturn: 2645 case Bytecodes::_freturn: 2646 return_current(pop()); 2647 break; 2648 case Bytecodes::_lreturn: 2649 return_current(pop_pair()); 2650 break; 2651 case Bytecodes::_dreturn: 2652 return_current(pop_pair()); 2653 break; 2654 2655 case Bytecodes::_athrow: 2656 // null exception oop throws NULL pointer exception 2657 null_check(peek()); 2658 if (stopped()) return; 2659 // Hook the thrown exception directly to subsequent handlers. 2660 if (BailoutToInterpreterForThrows) { 2661 // Keep method interpreted from now on. 2662 uncommon_trap(Deoptimization::Reason_unhandled, 2663 Deoptimization::Action_make_not_compilable); 2664 return; 2665 } 2666 if (env()->jvmti_can_post_on_exceptions()) { 2667 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 2668 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 2669 } 2670 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 2671 add_exception_state(make_exception_state(peek())); 2672 break; 2673 2674 case Bytecodes::_goto: // fall through 2675 case Bytecodes::_goto_w: { 2676 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 2677 2678 // If this is a backwards branch in the bytecodes, add Safepoint 2679 maybe_add_safepoint(target_bci); 2680 2681 // Update method data 2682 profile_taken_branch(target_bci); 2683 2684 // Merge the current control into the target basic block 2685 merge(target_bci); 2686 2687 // See if we can get some profile data and hand it off to the next block 2688 Block *target_block = block()->successor_for_bci(target_bci); 2689 if (target_block->pred_count() != 1) break; 2690 ciMethodData* methodData = method()->method_data(); 2691 if (!methodData->is_mature()) break; 2692 ciProfileData* data = methodData->bci_to_data(bci()); 2693 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 2694 int taken = ((ciJumpData*)data)->taken(); 2695 taken = method()->scale_count(taken); 2696 target_block->set_count(taken); 2697 break; 2698 } 2699 2700 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 2701 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 2702 handle_if_null: 2703 // If this is a backwards branch in the bytecodes, add Safepoint 2704 maybe_add_safepoint(iter().get_dest()); 2705 a = null(); 2706 b = pop(); 2707 if (!_gvn.type(b)->speculative_maybe_null() && 2708 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 2709 inc_sp(1); 2710 Node* null_ctl = top(); 2711 b = null_check_oop(b, &null_ctl, true, true, true); 2712 assert(null_ctl->is_top(), "no null control here"); 2713 dec_sp(1); 2714 } else if (_gvn.type(b)->speculative_always_null() && 2715 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { 2716 inc_sp(1); 2717 b = null_assert(b); 2718 dec_sp(1); 2719 } 2720 c = _gvn.transform( new CmpPNode(b, a) ); 2721 do_ifnull(btest, c); 2722 break; 2723 2724 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 2725 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 2726 handle_if_acmp: 2727 // If this is a backwards branch in the bytecodes, add Safepoint 2728 maybe_add_safepoint(iter().get_dest()); 2729 a = pop(); 2730 b = pop(); 2731 c = _gvn.transform( new CmpPNode(b, a) ); 2732 c = optimize_cmp_with_klass(c); 2733 do_if(btest, c); 2734 break; 2735 2736 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 2737 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 2738 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 2739 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 2740 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 2741 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 2742 handle_ifxx: 2743 // If this is a backwards branch in the bytecodes, add Safepoint 2744 maybe_add_safepoint(iter().get_dest()); 2745 a = _gvn.intcon(0); 2746 b = pop(); 2747 c = _gvn.transform( new CmpINode(b, a) ); 2748 do_if(btest, c); 2749 break; 2750 2751 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 2752 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 2753 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 2754 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 2755 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 2756 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 2757 handle_if_icmp: 2758 // If this is a backwards branch in the bytecodes, add Safepoint 2759 maybe_add_safepoint(iter().get_dest()); 2760 a = pop(); 2761 b = pop(); 2762 c = _gvn.transform( new CmpINode( b, a ) ); 2763 do_if(btest, c); 2764 break; 2765 2766 case Bytecodes::_tableswitch: 2767 do_tableswitch(); 2768 break; 2769 2770 case Bytecodes::_lookupswitch: 2771 do_lookupswitch(); 2772 break; 2773 2774 case Bytecodes::_invokestatic: 2775 case Bytecodes::_invokedynamic: 2776 case Bytecodes::_invokespecial: 2777 case Bytecodes::_invokevirtual: 2778 case Bytecodes::_invokeinterface: 2779 do_call(); 2780 break; 2781 case Bytecodes::_checkcast: 2782 do_checkcast(); 2783 break; 2784 case Bytecodes::_instanceof: 2785 do_instanceof(); 2786 break; 2787 case Bytecodes::_anewarray: 2788 do_anewarray(); 2789 break; 2790 case Bytecodes::_newarray: 2791 do_newarray((BasicType)iter().get_index()); 2792 break; 2793 case Bytecodes::_multianewarray: 2794 do_multianewarray(); 2795 break; 2796 case Bytecodes::_new: 2797 do_new(); 2798 break; 2799 2800 case Bytecodes::_jsr: 2801 case Bytecodes::_jsr_w: 2802 do_jsr(); 2803 break; 2804 2805 case Bytecodes::_ret: 2806 do_ret(); 2807 break; 2808 2809 2810 case Bytecodes::_monitorenter: 2811 do_monitor_enter(); 2812 break; 2813 2814 case Bytecodes::_monitorexit: 2815 do_monitor_exit(); 2816 break; 2817 2818 case Bytecodes::_breakpoint: 2819 // Breakpoint set concurrently to compile 2820 // %%% use an uncommon trap? 2821 C->record_failure("breakpoint in method"); 2822 return; 2823 2824 default: 2825 #ifndef PRODUCT 2826 map()->dump(99); 2827 #endif 2828 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 2829 ShouldNotReachHere(); 2830 } 2831 2832 #ifndef PRODUCT 2833 IdealGraphPrinter *printer = C->printer(); 2834 if (printer && printer->should_print(1)) { 2835 char buffer[256]; 2836 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 2837 bool old = printer->traverse_outs(); 2838 printer->set_traverse_outs(true); 2839 printer->print_method(buffer, 4); 2840 printer->set_traverse_outs(old); 2841 } 2842 #endif 2843 }