1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/idealKit.hpp" 40 #include "opto/matcher.hpp" 41 #include "opto/memnode.hpp" 42 #include "opto/mulnode.hpp" 43 #include "opto/opaquenode.hpp" 44 #include "opto/parse.hpp" 45 #include "opto/runtime.hpp" 46 #include "opto/valuetypenode.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 50 #ifndef PRODUCT 51 extern int explicit_null_checks_inserted, 52 explicit_null_checks_elided; 53 #endif 54 55 //---------------------------------array_load---------------------------------- 56 void Parse::array_load(BasicType bt) { 57 const Type* elemtype = Type::TOP; 58 Node* adr = array_addressing(bt, 0, &elemtype); 59 if (stopped()) return; // guaranteed null or range check 60 61 Node* idx = pop(); 62 Node* ary = pop(); 63 64 // Handle value type arrays 65 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 66 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 67 if (elemtype->isa_valuetype() != NULL) { 68 C->set_flattened_accesses(); 69 // Load from flattened value type array 70 Node* vt = ValueTypeNode::make_from_flattened(this, elemtype->value_klass(), ary, adr); 71 push(vt); 72 return; 73 } else if (elemptr != NULL && elemptr->is_valuetypeptr() && !elemptr->maybe_null()) { 74 // Load from non-flattened but flattenable value type array (elements can never be null) 75 bt = T_VALUETYPE; 76 } else if (!ary_t->is_not_flat()) { 77 // Cannot statically determine if array is flattened, emit runtime check 78 assert(ValueArrayFlatten && elemptr->can_be_value_type() && !ary_t->klass_is_exact() && !ary_t->is_not_null_free() && 79 (!elemptr->is_valuetypeptr() || elemptr->value_klass()->flatten_array()), "array can't be flattened"); 80 Node* ctl = control(); 81 IdealKit ideal(this); 82 IdealVariable res(ideal); 83 ideal.declarations_done(); 84 Node* kls = load_object_klass(ary); 85 Node* tag = load_lh_array_tag(kls); 86 ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { 87 // non-flattened 88 sync_kit(ideal); 89 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 90 Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt, 91 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD, ctl); 92 ideal.sync_kit(this); 93 ideal.set(res, ld); 94 } ideal.else_(); { 95 // flattened 96 sync_kit(ideal); 97 if (elemptr->is_valuetypeptr()) { 98 // Element type is known, cast and load from flattened representation 99 ciValueKlass* vk = elemptr->value_klass(); 100 assert(vk->flatten_array() && elemptr->maybe_null(), "must be a flattenable and nullable array"); 101 ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true); 102 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); 103 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); 104 adr = array_element_address(cast, idx, T_VALUETYPE, ary_t->size(), control()); 105 Node* vt = ValueTypeNode::make_from_flattened(this, vk, cast, adr)->allocate(this, false, false)->get_oop(); 106 ideal.set(res, vt); 107 ideal.sync_kit(this); 108 } else { 109 // Element type is unknown, emit runtime call 110 Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset())); 111 Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); 112 Node* obj_size = NULL; 113 kill_dead_locals(); 114 inc_sp(2); 115 Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true); 116 dec_sp(2); 117 118 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); 119 assert(alloc->maybe_set_complete(&_gvn), ""); 120 alloc->initialization()->set_complete_with_arraycopy(); 121 122 // This membar keeps this access to an unknown flattened array 123 // correctly ordered with other unknown and known flattened 124 // array accesses. 125 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES)); 126 127 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 128 // Unknown value type might contain reference fields 129 if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) { 130 int base_off = sizeof(instanceOopDesc); 131 Node* dst_base = basic_plus_adr(alloc_obj, base_off); 132 Node* countx = obj_size; 133 countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); 134 countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); 135 136 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); 137 Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset())); 138 Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); 139 uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE); 140 Node* base = basic_plus_adr(ary, header); 141 idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control()); 142 Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift)); 143 Node* adr = basic_plus_adr(ary, base, scale); 144 145 access_clone(adr, dst_base, countx, false); 146 } else { 147 ideal.sync_kit(this); 148 ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(), 149 CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value), 150 "load_unknown_value", 151 ary, idx, alloc_obj); 152 sync_kit(ideal); 153 } 154 155 // This makes sure no other thread sees a partially initialized buffered value 156 insert_mem_bar_volatile(Op_MemBarStoreStore, Compile::AliasIdxRaw, alloc->proj_out_or_null(AllocateNode::RawAddress)); 157 158 // Same as MemBarCPUOrder above: keep this unknown flattened 159 // array access correctly ordered with other flattened array 160 // access 161 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES)); 162 163 // Prevent any use of the newly allocated value before it is 164 // fully initialized 165 alloc_obj = new CastPPNode(alloc_obj, _gvn.type(alloc_obj), true); 166 alloc_obj->set_req(0, control()); 167 alloc_obj = _gvn.transform(alloc_obj); 168 169 ideal.sync_kit(this); 170 171 ideal.set(res, alloc_obj); 172 } 173 } ideal.end_if(); 174 sync_kit(ideal); 175 push_node(bt, _gvn.transform(ideal.value(res))); 176 return; 177 } 178 179 if (elemtype == TypeInt::BOOL) { 180 bt = T_BOOLEAN; 181 } else if (bt == T_OBJECT) { 182 elemtype = ary_t->elem()->make_oopptr(); 183 } 184 185 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 186 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, 187 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 188 if (bt == T_VALUETYPE) { 189 // Loading a non-flattened (but flattenable) value type from an array 190 assert(!gvn().type(ld)->maybe_null(), "value type array elements should never be null"); 191 if (elemptr->value_klass()->is_scalarizable()) { 192 ld = ValueTypeNode::make_from_oop(this, ld, elemptr->value_klass()); 193 } 194 } 195 196 push_node(bt, ld); 197 } 198 199 200 //--------------------------------array_store---------------------------------- 201 void Parse::array_store(BasicType bt) { 202 const Type* elemtype = Type::TOP; 203 Node* adr = array_addressing(bt, type2size[bt], &elemtype); 204 if (stopped()) return; // guaranteed null or range check 205 Node* cast_val = NULL; 206 if (bt == T_OBJECT) { 207 cast_val = array_store_check(); 208 if (stopped()) return; 209 } 210 Node* val = pop_node(bt); // Value to store 211 Node* idx = pop(); // Index in the array 212 Node* ary = pop(); // The array itself 213 214 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 215 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 216 217 if (elemtype == TypeInt::BOOL) { 218 bt = T_BOOLEAN; 219 } else if (bt == T_OBJECT) { 220 elemtype = elemtype->make_oopptr(); 221 const Type* tval = _gvn.type(cast_val); 222 // We may have lost type information for 'val' here due to the casts 223 // emitted by the array_store_check code (see JDK-6312651) 224 // TODO Remove this code once JDK-6312651 is in. 225 const Type* tval_init = _gvn.type(val); 226 bool can_be_value_type = tval->isa_valuetype() || (tval != TypePtr::NULL_PTR && tval_init->is_oopptr()->can_be_value_type() && tval->is_oopptr()->can_be_value_type()); 227 bool not_flattenable = !can_be_value_type || ((tval_init->is_valuetypeptr() || tval_init->isa_valuetype()) && !tval_init->value_klass()->flatten_array()); 228 229 if (!ary_t->is_not_null_free() && !can_be_value_type && (!tval->maybe_null() || !tval_init->maybe_null())) { 230 // Storing a non-inline-type, mark array as not null-free. 231 // This is only legal for non-null stores because the array_store_check passes for null. 232 ary_t = ary_t->cast_to_not_null_free(); 233 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t)); 234 replace_in_map(ary, cast); 235 ary = cast; 236 } else if (!ary_t->is_not_flat() && not_flattenable) { 237 // Storing a non-flattenable value, mark array as not flat. 238 ary_t = ary_t->cast_to_not_flat(); 239 if (tval != TypePtr::NULL_PTR) { 240 // For NULL, this transformation is only valid after the null guard below 241 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t)); 242 replace_in_map(ary, cast); 243 ary = cast; 244 } 245 } 246 247 if (ary_t->elem()->isa_valuetype() != NULL) { 248 // Store to flattened value type array 249 C->set_flattened_accesses(); 250 if (!cast_val->is_ValueType()) { 251 inc_sp(3); 252 cast_val = null_check(cast_val); 253 if (stopped()) return; 254 dec_sp(3); 255 cast_val = ValueTypeNode::make_from_oop(this, cast_val, ary_t->elem()->value_klass()); 256 } 257 cast_val->as_ValueType()->store_flattened(this, ary, adr); 258 return; 259 } else if (elemtype->is_valuetypeptr() && !elemtype->maybe_null()) { 260 // Store to non-flattened but flattenable value type array (elements can never be null) 261 if (!cast_val->is_ValueType() && tval->maybe_null()) { 262 inc_sp(3); 263 cast_val = null_check(cast_val); 264 if (stopped()) return; 265 dec_sp(3); 266 } 267 } else if (!ary_t->is_not_flat()) { 268 // Array might be flattened, emit runtime checks 269 assert(ValueArrayFlatten && !not_flattenable && elemtype->is_oopptr()->can_be_value_type() && 270 !ary_t->klass_is_exact() && !ary_t->is_not_null_free(), "array can't be flattened"); 271 IdealKit ideal(this); 272 Node* kls = load_object_klass(ary); 273 Node* layout_val = load_lh_array_tag(kls); 274 ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); 275 { 276 // non-flattened 277 sync_kit(ideal); 278 gen_value_array_null_guard(ary, cast_val, 3); 279 access_store_at(ary, adr, adr_type, cast_val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false, false); 280 ideal.sync_kit(this); 281 } 282 ideal.else_(); 283 { 284 // flattened 285 if (!cast_val->is_ValueType() && tval->maybe_null()) { 286 // Add null check 287 sync_kit(ideal); 288 Node* null_ctl = top(); 289 cast_val = null_check_oop(cast_val, &null_ctl); 290 if (null_ctl != top()) { 291 PreserveJVMState pjvms(this); 292 inc_sp(3); 293 set_control(null_ctl); 294 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); 295 dec_sp(3); 296 } 297 ideal.sync_kit(this); 298 } 299 // Try to determine the value klass 300 ciValueKlass* vk = NULL; 301 if (tval->isa_valuetype() || tval->is_valuetypeptr()) { 302 vk = tval->value_klass(); 303 } else if (tval_init->isa_valuetype() || tval_init->is_valuetypeptr()) { 304 vk = tval_init->value_klass(); 305 } else if (elemtype->is_valuetypeptr()) { 306 vk = elemtype->value_klass(); 307 } 308 if (vk != NULL && !stopped()) { 309 // Element type is known, cast and store to flattened representation 310 sync_kit(ideal); 311 assert(vk->flatten_array() && elemtype->maybe_null(), "must be a flattenable and nullable array"); 312 ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true); 313 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); 314 ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); 315 adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control()); 316 if (!cast_val->is_ValueType()) { 317 assert(!gvn().type(cast_val)->maybe_null(), "value type array elements should never be null"); 318 cast_val = ValueTypeNode::make_from_oop(this, cast_val, vk); 319 } 320 cast_val->as_ValueType()->store_flattened(this, ary, adr); 321 ideal.sync_kit(this); 322 } else if (!ideal.ctrl()->is_top()) { 323 // Element type is unknown, emit runtime call 324 sync_kit(ideal); 325 326 // This membar keeps this access to an unknown flattened 327 // array correctly ordered with other unknown and known 328 // flattened array accesses. 329 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES)); 330 ideal.sync_kit(this); 331 332 ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(), 333 CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value), 334 "store_unknown_value", 335 cast_val, ary, idx); 336 337 sync_kit(ideal); 338 // Same as MemBarCPUOrder above: keep this unknown 339 // flattened array access correctly ordered with other 340 // flattened array access 341 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES)); 342 ideal.sync_kit(this); 343 } 344 } 345 ideal.end_if(); 346 sync_kit(ideal); 347 return; 348 } else if (!ary_t->is_not_null_free()) { 349 // Array is not flattened but may be null free 350 assert(elemtype->is_oopptr()->can_be_value_type() && !ary_t->klass_is_exact(), "array can't be null free"); 351 ary = gen_value_array_null_guard(ary, cast_val, 3, true); 352 } 353 } 354 355 access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); 356 } 357 358 359 //------------------------------array_addressing------------------------------- 360 // Pull array and index from the stack. Compute pointer-to-element. 361 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 362 Node *idx = peek(0+vals); // Get from stack without popping 363 Node *ary = peek(1+vals); // in case of exception 364 365 // Null check the array base, with correct stack contents 366 ary = null_check(ary, T_ARRAY); 367 // Compile-time detect of null-exception? 368 if (stopped()) return top(); 369 370 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 371 const TypeInt* sizetype = arytype->size(); 372 const Type* elemtype = arytype->elem(); 373 374 if (UseUniqueSubclasses && result2 != NULL) { 375 const Type* el = elemtype->make_ptr(); 376 if (el && el->isa_instptr()) { 377 const TypeInstPtr* toop = el->is_instptr(); 378 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 379 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 380 const Type* subklass = Type::get_const_type(toop->klass()); 381 elemtype = subklass->join_speculative(el); 382 } 383 } 384 } 385 386 // Check for big class initializers with all constant offsets 387 // feeding into a known-size array. 388 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 389 // See if the highest idx value is less than the lowest array bound, 390 // and if the idx value cannot be negative: 391 bool need_range_check = true; 392 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 393 need_range_check = false; 394 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 395 } 396 397 ciKlass * arytype_klass = arytype->klass(); 398 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 399 // Only fails for some -Xcomp runs 400 // The class is unloaded. We have to run this bytecode in the interpreter. 401 uncommon_trap(Deoptimization::Reason_unloaded, 402 Deoptimization::Action_reinterpret, 403 arytype->klass(), "!loaded array"); 404 return top(); 405 } 406 407 // Do the range check 408 if (GenerateRangeChecks && need_range_check) { 409 Node* tst; 410 if (sizetype->_hi <= 0) { 411 // The greatest array bound is negative, so we can conclude that we're 412 // compiling unreachable code, but the unsigned compare trick used below 413 // only works with non-negative lengths. Instead, hack "tst" to be zero so 414 // the uncommon_trap path will always be taken. 415 tst = _gvn.intcon(0); 416 } else { 417 // Range is constant in array-oop, so we can use the original state of mem 418 Node* len = load_array_length(ary); 419 420 // Test length vs index (standard trick using unsigned compare) 421 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 422 BoolTest::mask btest = BoolTest::lt; 423 tst = _gvn.transform( new BoolNode(chk, btest) ); 424 } 425 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 426 _gvn.set_type(rc, rc->Value(&_gvn)); 427 if (!tst->is_Con()) { 428 record_for_igvn(rc); 429 } 430 set_control(_gvn.transform(new IfTrueNode(rc))); 431 // Branch to failure if out of bounds 432 { 433 PreserveJVMState pjvms(this); 434 set_control(_gvn.transform(new IfFalseNode(rc))); 435 if (C->allow_range_check_smearing()) { 436 // Do not use builtin_throw, since range checks are sometimes 437 // made more stringent by an optimistic transformation. 438 // This creates "tentative" range checks at this point, 439 // which are not guaranteed to throw exceptions. 440 // See IfNode::Ideal, is_range_check, adjust_check. 441 uncommon_trap(Deoptimization::Reason_range_check, 442 Deoptimization::Action_make_not_entrant, 443 NULL, "range_check"); 444 } else { 445 // If we have already recompiled with the range-check-widening 446 // heroic optimization turned off, then we must really be throwing 447 // range check exceptions. 448 builtin_throw(Deoptimization::Reason_range_check, idx); 449 } 450 } 451 } 452 // Check for always knowing you are throwing a range-check exception 453 if (stopped()) return top(); 454 455 // Speculate on the array not being null-free 456 if (!arytype->is_not_null_free() && arytype->speculative() != NULL && arytype->speculative()->isa_aryptr() != NULL && 457 arytype->speculative()->is_aryptr()->is_not_null_free() && 458 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) { 459 Node* tst = gen_null_free_array_check(ary); 460 { 461 BuildCutout unless(this, tst, PROB_ALWAYS); 462 uncommon_trap(Deoptimization::Reason_speculate_class_check, 463 Deoptimization::Action_maybe_recompile); 464 } 465 Node* cast = new CheckCastPPNode(control(), ary, arytype->cast_to_not_null_free()); 466 replace_in_map(ary, _gvn.transform(cast)); 467 } 468 469 // Make array address computation control dependent to prevent it 470 // from floating above the range check during loop optimizations. 471 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 472 473 if (result2 != NULL) *result2 = elemtype; 474 475 assert(ptr != top(), "top should go hand-in-hand with stopped"); 476 477 return ptr; 478 } 479 480 481 // returns IfNode 482 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) { 483 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 484 Node *tst = _gvn.transform(new BoolNode(cmp, mask)); 485 IfNode *iff = create_and_map_if(control(), tst, prob, cnt); 486 return iff; 487 } 488 489 // return Region node 490 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 491 Node *region = new RegionNode(3); // 2 results 492 record_for_igvn(region); 493 region->init_req(1, iffalse); 494 region->init_req(2, iftrue ); 495 _gvn.set_type(region, Type::CONTROL); 496 region = _gvn.transform(region); 497 set_control (region); 498 return region; 499 } 500 501 // sentinel value for the target bci to mark never taken branches 502 // (according to profiling) 503 static const int never_reached = INT_MAX; 504 505 //------------------------------helper for tableswitch------------------------- 506 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 507 // True branch, use existing map info 508 { PreserveJVMState pjvms(this); 509 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 510 set_control( iftrue ); 511 if (unc) { 512 repush_if_args(); 513 uncommon_trap(Deoptimization::Reason_unstable_if, 514 Deoptimization::Action_reinterpret, 515 NULL, 516 "taken always"); 517 } else { 518 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 519 profile_switch_case(prof_table_index); 520 merge_new_path(dest_bci_if_true); 521 } 522 } 523 524 // False branch 525 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 526 set_control( iffalse ); 527 } 528 529 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 530 // True branch, use existing map info 531 { PreserveJVMState pjvms(this); 532 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 533 set_control( iffalse ); 534 if (unc) { 535 repush_if_args(); 536 uncommon_trap(Deoptimization::Reason_unstable_if, 537 Deoptimization::Action_reinterpret, 538 NULL, 539 "taken never"); 540 } else { 541 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 542 profile_switch_case(prof_table_index); 543 merge_new_path(dest_bci_if_true); 544 } 545 } 546 547 // False branch 548 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 549 set_control( iftrue ); 550 } 551 552 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index, bool unc) { 553 // False branch, use existing map and control() 554 if (unc) { 555 repush_if_args(); 556 uncommon_trap(Deoptimization::Reason_unstable_if, 557 Deoptimization::Action_reinterpret, 558 NULL, 559 "taken never"); 560 } else { 561 assert(dest_bci != never_reached, "inconsistent dest"); 562 profile_switch_case(prof_table_index); 563 merge_new_path(dest_bci); 564 } 565 } 566 567 568 extern "C" { 569 static int jint_cmp(const void *i, const void *j) { 570 int a = *(jint *)i; 571 int b = *(jint *)j; 572 return a > b ? 1 : a < b ? -1 : 0; 573 } 574 } 575 576 577 // Default value for methodData switch indexing. Must be a negative value to avoid 578 // conflict with any legal switch index. 579 #define NullTableIndex -1 580 581 class SwitchRange : public StackObj { 582 // a range of integers coupled with a bci destination 583 jint _lo; // inclusive lower limit 584 jint _hi; // inclusive upper limit 585 int _dest; 586 int _table_index; // index into method data table 587 float _cnt; // how many times this range was hit according to profiling 588 589 public: 590 jint lo() const { return _lo; } 591 jint hi() const { return _hi; } 592 int dest() const { return _dest; } 593 int table_index() const { return _table_index; } 594 bool is_singleton() const { return _lo == _hi; } 595 float cnt() const { return _cnt; } 596 597 void setRange(jint lo, jint hi, int dest, int table_index, float cnt) { 598 assert(lo <= hi, "must be a non-empty range"); 599 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; _cnt = cnt; 600 assert(_cnt >= 0, ""); 601 } 602 bool adjoinRange(jint lo, jint hi, int dest, int table_index, float cnt, bool trim_ranges) { 603 assert(lo <= hi, "must be a non-empty range"); 604 if (lo == _hi+1 && table_index == _table_index) { 605 // see merge_ranges() comment below 606 if (trim_ranges) { 607 if (cnt == 0) { 608 if (_cnt != 0) { 609 return false; 610 } 611 if (dest != _dest) { 612 _dest = never_reached; 613 } 614 } else { 615 if (_cnt == 0) { 616 return false; 617 } 618 if (dest != _dest) { 619 return false; 620 } 621 } 622 } else { 623 if (dest != _dest) { 624 return false; 625 } 626 } 627 _hi = hi; 628 _cnt += cnt; 629 return true; 630 } 631 return false; 632 } 633 634 void set (jint value, int dest, int table_index, float cnt) { 635 setRange(value, value, dest, table_index, cnt); 636 } 637 bool adjoin(jint value, int dest, int table_index, float cnt, bool trim_ranges) { 638 return adjoinRange(value, value, dest, table_index, cnt, trim_ranges); 639 } 640 bool adjoin(SwitchRange& other) { 641 return adjoinRange(other._lo, other._hi, other._dest, other._table_index, other._cnt, false); 642 } 643 644 void print() { 645 if (is_singleton()) 646 tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt()); 647 else if (lo() == min_jint) 648 tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt()); 649 else if (hi() == max_jint) 650 tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt()); 651 else 652 tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt()); 653 } 654 }; 655 656 // We try to minimize the number of ranges and the size of the taken 657 // ones using profiling data. When ranges are created, 658 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge 659 // if both were never hit or both were hit to build longer unreached 660 // ranges. Here, we now merge adjoining ranges with the same 661 // destination and finally set destination of unreached ranges to the 662 // special value never_reached because it can help minimize the number 663 // of tests that are necessary. 664 // 665 // For instance: 666 // [0, 1] to target1 sometimes taken 667 // [1, 2] to target1 never taken 668 // [2, 3] to target2 never taken 669 // would lead to: 670 // [0, 1] to target1 sometimes taken 671 // [1, 3] never taken 672 // 673 // (first 2 ranges to target1 are not merged) 674 static void merge_ranges(SwitchRange* ranges, int& rp) { 675 if (rp == 0) { 676 return; 677 } 678 int shift = 0; 679 for (int j = 0; j < rp; j++) { 680 SwitchRange& r1 = ranges[j-shift]; 681 SwitchRange& r2 = ranges[j+1]; 682 if (r1.adjoin(r2)) { 683 shift++; 684 } else if (shift > 0) { 685 ranges[j+1-shift] = r2; 686 } 687 } 688 rp -= shift; 689 for (int j = 0; j <= rp; j++) { 690 SwitchRange& r = ranges[j]; 691 if (r.cnt() == 0 && r.dest() != never_reached) { 692 r.setRange(r.lo(), r.hi(), never_reached, r.table_index(), r.cnt()); 693 } 694 } 695 } 696 697 //-------------------------------do_tableswitch-------------------------------- 698 void Parse::do_tableswitch() { 699 Node* lookup = pop(); 700 // Get information about tableswitch 701 int default_dest = iter().get_dest_table(0); 702 int lo_index = iter().get_int_table(1); 703 int hi_index = iter().get_int_table(2); 704 int len = hi_index - lo_index + 1; 705 706 if (len < 1) { 707 // If this is a backward branch, add safepoint 708 maybe_add_safepoint(default_dest); 709 merge(default_dest); 710 return; 711 } 712 713 ciMethodData* methodData = method()->method_data(); 714 ciMultiBranchData* profile = NULL; 715 if (methodData->is_mature() && UseSwitchProfiling) { 716 ciProfileData* data = methodData->bci_to_data(bci()); 717 if (data != NULL && data->is_MultiBranchData()) { 718 profile = (ciMultiBranchData*)data; 719 } 720 } 721 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 722 723 // generate decision tree, using trichotomy when possible 724 int rnum = len+2; 725 bool makes_backward_branch = false; 726 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 727 int rp = -1; 728 if (lo_index != min_jint) { 729 uint cnt = 1; 730 if (profile != NULL) { 731 cnt = profile->default_count() / (hi_index != max_jint ? 2 : 1); 732 } 733 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex, cnt); 734 } 735 for (int j = 0; j < len; j++) { 736 jint match_int = lo_index+j; 737 int dest = iter().get_dest_table(j+3); 738 makes_backward_branch |= (dest <= bci()); 739 int table_index = method_data_update() ? j : NullTableIndex; 740 uint cnt = 1; 741 if (profile != NULL) { 742 cnt = profile->count_at(j); 743 } 744 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 745 ranges[++rp].set(match_int, dest, table_index, cnt); 746 } 747 } 748 jint highest = lo_index+(len-1); 749 assert(ranges[rp].hi() == highest, ""); 750 if (highest != max_jint) { 751 uint cnt = 1; 752 if (profile != NULL) { 753 cnt = profile->default_count() / (lo_index != min_jint ? 2 : 1); 754 } 755 if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, cnt, trim_ranges)) { 756 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, cnt); 757 } 758 } 759 assert(rp < len+2, "not too many ranges"); 760 761 if (trim_ranges) { 762 merge_ranges(ranges, rp); 763 } 764 765 // Safepoint in case if backward branch observed 766 if( makes_backward_branch && UseLoopSafepoints ) 767 add_safepoint(); 768 769 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 770 } 771 772 773 //------------------------------do_lookupswitch-------------------------------- 774 void Parse::do_lookupswitch() { 775 Node *lookup = pop(); // lookup value 776 // Get information about lookupswitch 777 int default_dest = iter().get_dest_table(0); 778 int len = iter().get_int_table(1); 779 780 if (len < 1) { // If this is a backward branch, add safepoint 781 maybe_add_safepoint(default_dest); 782 merge(default_dest); 783 return; 784 } 785 786 ciMethodData* methodData = method()->method_data(); 787 ciMultiBranchData* profile = NULL; 788 if (methodData->is_mature() && UseSwitchProfiling) { 789 ciProfileData* data = methodData->bci_to_data(bci()); 790 if (data != NULL && data->is_MultiBranchData()) { 791 profile = (ciMultiBranchData*)data; 792 } 793 } 794 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 795 796 // generate decision tree, using trichotomy when possible 797 jint* table = NEW_RESOURCE_ARRAY(jint, len*3); 798 { 799 for (int j = 0; j < len; j++) { 800 table[3*j+0] = iter().get_int_table(2+2*j); 801 table[3*j+1] = iter().get_dest_table(2+2*j+1); 802 table[3*j+2] = profile == NULL ? 1 : profile->count_at(j); 803 } 804 qsort(table, len, 3*sizeof(table[0]), jint_cmp); 805 } 806 807 float defaults = 0; 808 jint prev = min_jint; 809 for (int j = 0; j < len; j++) { 810 jint match_int = table[3*j+0]; 811 if (match_int != prev) { 812 defaults += (float)match_int - prev; 813 } 814 prev = match_int+1; 815 } 816 if (prev-1 != max_jint) { 817 defaults += (float)max_jint - prev + 1; 818 } 819 float default_cnt = 1; 820 if (profile != NULL) { 821 default_cnt = profile->default_count()/defaults; 822 } 823 824 int rnum = len*2+1; 825 bool makes_backward_branch = false; 826 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 827 int rp = -1; 828 for (int j = 0; j < len; j++) { 829 jint match_int = table[3*j+0]; 830 int dest = table[3*j+1]; 831 int cnt = table[3*j+2]; 832 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 833 int table_index = method_data_update() ? j : NullTableIndex; 834 makes_backward_branch |= (dest <= bci()); 835 float c = default_cnt * ((float)match_int - next_lo); 836 if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, NullTableIndex, c, trim_ranges))) { 837 assert(default_dest != never_reached, "sentinel value for dead destinations"); 838 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex, c); 839 } 840 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 841 assert(dest != never_reached, "sentinel value for dead destinations"); 842 ranges[++rp].set(match_int, dest, table_index, cnt); 843 } 844 } 845 jint highest = table[3*(len-1)]; 846 assert(ranges[rp].hi() == highest, ""); 847 if (highest != max_jint && 848 !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest), trim_ranges)) { 849 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest)); 850 } 851 assert(rp < rnum, "not too many ranges"); 852 853 if (trim_ranges) { 854 merge_ranges(ranges, rp); 855 } 856 857 // Safepoint in case backward branch observed 858 if (makes_backward_branch && UseLoopSafepoints) 859 add_safepoint(); 860 861 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 862 } 863 864 static float if_prob(float taken_cnt, float total_cnt) { 865 assert(taken_cnt <= total_cnt, ""); 866 if (total_cnt == 0) { 867 return PROB_FAIR; 868 } 869 float p = taken_cnt / total_cnt; 870 return MIN2(MAX2(p, PROB_MIN), PROB_MAX); 871 } 872 873 static float if_cnt(float cnt) { 874 if (cnt == 0) { 875 return COUNT_UNKNOWN; 876 } 877 return cnt; 878 } 879 880 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) { 881 float total_cnt = 0; 882 for (SwitchRange* sr = lo; sr <= hi; sr++) { 883 total_cnt += sr->cnt(); 884 } 885 return total_cnt; 886 } 887 888 class SwitchRanges : public ResourceObj { 889 public: 890 SwitchRange* _lo; 891 SwitchRange* _hi; 892 SwitchRange* _mid; 893 float _cost; 894 895 enum { 896 Start, 897 LeftDone, 898 RightDone, 899 Done 900 } _state; 901 902 SwitchRanges(SwitchRange *lo, SwitchRange *hi) 903 : _lo(lo), _hi(hi), _mid(NULL), 904 _cost(0), _state(Start) { 905 } 906 907 SwitchRanges() 908 : _lo(NULL), _hi(NULL), _mid(NULL), 909 _cost(0), _state(Start) {} 910 }; 911 912 // Estimate cost of performing a binary search on lo..hi 913 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) { 914 GrowableArray<SwitchRanges> tree; 915 SwitchRanges root(lo, hi); 916 tree.push(root); 917 918 float cost = 0; 919 do { 920 SwitchRanges& r = *tree.adr_at(tree.length()-1); 921 if (r._hi != r._lo) { 922 if (r._mid == NULL) { 923 float r_cnt = sum_of_cnts(r._lo, r._hi); 924 925 if (r_cnt == 0) { 926 tree.pop(); 927 cost = 0; 928 continue; 929 } 930 931 SwitchRange* mid = NULL; 932 mid = r._lo; 933 for (float cnt = 0; ; ) { 934 assert(mid <= r._hi, "out of bounds"); 935 cnt += mid->cnt(); 936 if (cnt > r_cnt / 2) { 937 break; 938 } 939 mid++; 940 } 941 assert(mid <= r._hi, "out of bounds"); 942 r._mid = mid; 943 r._cost = r_cnt / total_cnt; 944 } 945 r._cost += cost; 946 if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) { 947 cost = 0; 948 r._state = SwitchRanges::LeftDone; 949 tree.push(SwitchRanges(r._lo, r._mid-1)); 950 } else if (r._state < SwitchRanges::RightDone) { 951 cost = 0; 952 r._state = SwitchRanges::RightDone; 953 tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi)); 954 } else { 955 tree.pop(); 956 cost = r._cost; 957 } 958 } else { 959 tree.pop(); 960 cost = r._cost; 961 } 962 } while (tree.length() > 0); 963 964 965 return cost; 966 } 967 968 // It sometimes pays off to test most common ranges before the binary search 969 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) { 970 uint nr = hi - lo + 1; 971 float total_cnt = sum_of_cnts(lo, hi); 972 973 float min = compute_tree_cost(lo, hi, total_cnt); 974 float extra = 1; 975 float sub = 0; 976 977 SwitchRange* array1 = lo; 978 SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr); 979 980 SwitchRange* ranges = NULL; 981 982 while (nr >= 2) { 983 assert(lo == array1 || lo == array2, "one the 2 already allocated arrays"); 984 ranges = (lo == array1) ? array2 : array1; 985 986 // Find highest frequency range 987 SwitchRange* candidate = lo; 988 for (SwitchRange* sr = lo+1; sr <= hi; sr++) { 989 if (sr->cnt() > candidate->cnt()) { 990 candidate = sr; 991 } 992 } 993 SwitchRange most_freq = *candidate; 994 if (most_freq.cnt() == 0) { 995 break; 996 } 997 998 // Copy remaining ranges into another array 999 int shift = 0; 1000 for (uint i = 0; i < nr; i++) { 1001 SwitchRange* sr = &lo[i]; 1002 if (sr != candidate) { 1003 ranges[i-shift] = *sr; 1004 } else { 1005 shift++; 1006 if (i > 0 && i < nr-1) { 1007 SwitchRange prev = lo[i-1]; 1008 prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.table_index(), prev.cnt()); 1009 if (prev.adjoin(lo[i+1])) { 1010 shift++; 1011 i++; 1012 } 1013 ranges[i-shift] = prev; 1014 } 1015 } 1016 } 1017 nr -= shift; 1018 1019 // Evaluate cost of testing the most common range and performing a 1020 // binary search on the other ranges 1021 float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt); 1022 if (cost >= min) { 1023 break; 1024 } 1025 // swap arrays 1026 lo = &ranges[0]; 1027 hi = &ranges[nr-1]; 1028 1029 // It pays off: emit the test for the most common range 1030 assert(most_freq.cnt() > 0, "must be taken"); 1031 Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo()))); 1032 Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo()))); 1033 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le)); 1034 IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt())); 1035 jump_if_true_fork(iff, most_freq.dest(), most_freq.table_index(), false); 1036 1037 sub += most_freq.cnt() / total_cnt; 1038 extra += 1 - sub; 1039 min = cost; 1040 } 1041 } 1042 1043 //----------------------------create_jump_tables------------------------------- 1044 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 1045 // Are jumptables enabled 1046 if (!UseJumpTables) return false; 1047 1048 // Are jumptables supported 1049 if (!Matcher::has_match_rule(Op_Jump)) return false; 1050 1051 // Don't make jump table if profiling 1052 if (method_data_update()) return false; 1053 1054 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 1055 1056 // Decide if a guard is needed to lop off big ranges at either (or 1057 // both) end(s) of the input set. We'll call this the default target 1058 // even though we can't be sure that it is the true "default". 1059 1060 bool needs_guard = false; 1061 int default_dest; 1062 int64_t total_outlier_size = 0; 1063 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 1064 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 1065 1066 if (lo->dest() == hi->dest()) { 1067 total_outlier_size = hi_size + lo_size; 1068 default_dest = lo->dest(); 1069 } else if (lo_size > hi_size) { 1070 total_outlier_size = lo_size; 1071 default_dest = lo->dest(); 1072 } else { 1073 total_outlier_size = hi_size; 1074 default_dest = hi->dest(); 1075 } 1076 1077 float total = sum_of_cnts(lo, hi); 1078 float cost = compute_tree_cost(lo, hi, total); 1079 1080 // If a guard test will eliminate very sparse end ranges, then 1081 // it is worth the cost of an extra jump. 1082 float trimmed_cnt = 0; 1083 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 1084 needs_guard = true; 1085 if (default_dest == lo->dest()) { 1086 trimmed_cnt += lo->cnt(); 1087 lo++; 1088 } 1089 if (default_dest == hi->dest()) { 1090 trimmed_cnt += hi->cnt(); 1091 hi--; 1092 } 1093 } 1094 1095 // Find the total number of cases and ranges 1096 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 1097 int num_range = hi - lo + 1; 1098 1099 // Don't create table if: too large, too small, or too sparse. 1100 if (num_cases > MaxJumpTableSize) 1101 return false; 1102 if (UseSwitchProfiling) { 1103 // MinJumpTableSize is set so with a well balanced binary tree, 1104 // when the number of ranges is MinJumpTableSize, it's cheaper to 1105 // go through a JumpNode that a tree of IfNodes. Average cost of a 1106 // tree of IfNodes with MinJumpTableSize is 1107 // log2f(MinJumpTableSize) comparisons. So if the cost computed 1108 // from profile data is less than log2f(MinJumpTableSize) then 1109 // going with the binary search is cheaper. 1110 if (cost < log2f(MinJumpTableSize)) { 1111 return false; 1112 } 1113 } else { 1114 if (num_cases < MinJumpTableSize) 1115 return false; 1116 } 1117 if (num_cases > (MaxJumpTableSparseness * num_range)) 1118 return false; 1119 1120 // Normalize table lookups to zero 1121 int lowval = lo->lo(); 1122 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 1123 1124 // Generate a guard to protect against input keyvals that aren't 1125 // in the switch domain. 1126 if (needs_guard) { 1127 Node* size = _gvn.intcon(num_cases); 1128 Node* cmp = _gvn.transform(new CmpUNode(key_val, size)); 1129 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge)); 1130 IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt)); 1131 jump_if_true_fork(iff, default_dest, NullTableIndex, trim_ranges && trimmed_cnt == 0); 1132 1133 total -= trimmed_cnt; 1134 } 1135 1136 // Create an ideal node JumpTable that has projections 1137 // of all possible ranges for a switch statement 1138 // The key_val input must be converted to a pointer offset and scaled. 1139 // Compare Parse::array_addressing above. 1140 1141 // Clean the 32-bit int into a real 64-bit offset. 1142 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 1143 const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); 1144 // Make I2L conversion control dependent to prevent it from 1145 // floating above the range check during loop optimizations. 1146 key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); 1147 1148 // Shift the value by wordsize so we have an index into the table, rather 1149 // than a switch value 1150 Node *shiftWord = _gvn.MakeConX(wordSize); 1151 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 1152 1153 // Create the JumpNode 1154 Arena* arena = C->comp_arena(); 1155 float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases); 1156 int i = 0; 1157 if (total == 0) { 1158 for (SwitchRange* r = lo; r <= hi; r++) { 1159 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1160 probs[i] = 1.0F / num_cases; 1161 } 1162 } 1163 } else { 1164 for (SwitchRange* r = lo; r <= hi; r++) { 1165 float prob = r->cnt()/total; 1166 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1167 probs[i] = prob / (r->hi() - r->lo() + 1); 1168 } 1169 } 1170 } 1171 1172 ciMethodData* methodData = method()->method_data(); 1173 ciMultiBranchData* profile = NULL; 1174 if (methodData->is_mature()) { 1175 ciProfileData* data = methodData->bci_to_data(bci()); 1176 if (data != NULL && data->is_MultiBranchData()) { 1177 profile = (ciMultiBranchData*)data; 1178 } 1179 } 1180 1181 Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total)); 1182 1183 // These are the switch destinations hanging off the jumpnode 1184 i = 0; 1185 for (SwitchRange* r = lo; r <= hi; r++) { 1186 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1187 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 1188 { 1189 PreserveJVMState pjvms(this); 1190 set_control(input); 1191 jump_if_always_fork(r->dest(), r->table_index(), trim_ranges && r->cnt() == 0); 1192 } 1193 } 1194 } 1195 assert(i == num_cases, "miscount of cases"); 1196 stop_and_kill_map(); // no more uses for this JVMS 1197 return true; 1198 } 1199 1200 //----------------------------jump_switch_ranges------------------------------- 1201 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 1202 Block* switch_block = block(); 1203 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 1204 1205 if (switch_depth == 0) { 1206 // Do special processing for the top-level call. 1207 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 1208 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 1209 1210 // Decrement pred-numbers for the unique set of nodes. 1211 #ifdef ASSERT 1212 if (!trim_ranges) { 1213 // Ensure that the block's successors are a (duplicate-free) set. 1214 int successors_counted = 0; // block occurrences in [hi..lo] 1215 int unique_successors = switch_block->num_successors(); 1216 for (int i = 0; i < unique_successors; i++) { 1217 Block* target = switch_block->successor_at(i); 1218 1219 // Check that the set of successors is the same in both places. 1220 int successors_found = 0; 1221 for (SwitchRange* p = lo; p <= hi; p++) { 1222 if (p->dest() == target->start()) successors_found++; 1223 } 1224 assert(successors_found > 0, "successor must be known"); 1225 successors_counted += successors_found; 1226 } 1227 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 1228 } 1229 #endif 1230 1231 // Maybe prune the inputs, based on the type of key_val. 1232 jint min_val = min_jint; 1233 jint max_val = max_jint; 1234 const TypeInt* ti = key_val->bottom_type()->isa_int(); 1235 if (ti != NULL) { 1236 min_val = ti->_lo; 1237 max_val = ti->_hi; 1238 assert(min_val <= max_val, "invalid int type"); 1239 } 1240 while (lo->hi() < min_val) { 1241 lo++; 1242 } 1243 if (lo->lo() < min_val) { 1244 lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index(), lo->cnt()); 1245 } 1246 while (hi->lo() > max_val) { 1247 hi--; 1248 } 1249 if (hi->hi() > max_val) { 1250 hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index(), hi->cnt()); 1251 } 1252 1253 linear_search_switch_ranges(key_val, lo, hi); 1254 } 1255 1256 #ifndef PRODUCT 1257 if (switch_depth == 0) { 1258 _max_switch_depth = 0; 1259 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 1260 } 1261 #endif 1262 1263 assert(lo <= hi, "must be a non-empty set of ranges"); 1264 if (lo == hi) { 1265 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1266 } else { 1267 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 1268 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 1269 1270 if (create_jump_tables(key_val, lo, hi)) return; 1271 1272 SwitchRange* mid = NULL; 1273 float total_cnt = sum_of_cnts(lo, hi); 1274 1275 int nr = hi - lo + 1; 1276 if (UseSwitchProfiling) { 1277 // Don't keep the binary search tree balanced: pick up mid point 1278 // that split frequencies in half. 1279 float cnt = 0; 1280 for (SwitchRange* sr = lo; sr <= hi; sr++) { 1281 cnt += sr->cnt(); 1282 if (cnt >= total_cnt / 2) { 1283 mid = sr; 1284 break; 1285 } 1286 } 1287 } else { 1288 mid = lo + nr/2; 1289 1290 // if there is an easy choice, pivot at a singleton: 1291 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 1292 1293 assert(lo < mid && mid <= hi, "good pivot choice"); 1294 assert(nr != 2 || mid == hi, "should pick higher of 2"); 1295 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 1296 } 1297 1298 1299 Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo()); 1300 1301 if (mid->is_singleton()) { 1302 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt())); 1303 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index(), trim_ranges && mid->cnt() == 0); 1304 1305 // Special Case: If there are exactly three ranges, and the high 1306 // and low range each go to the same place, omit the "gt" test, 1307 // since it will not discriminate anything. 1308 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo; 1309 1310 // if there is a higher range, test for it and process it: 1311 if (mid < hi && !eq_test_only) { 1312 // two comparisons of same values--should enable 1 test for 2 branches 1313 // Use BoolTest::le instead of BoolTest::gt 1314 float cnt = sum_of_cnts(lo, mid-1); 1315 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le, if_prob(cnt, total_cnt), if_cnt(cnt)); 1316 Node *iftrue = _gvn.transform( new IfTrueNode(iff_le) ); 1317 Node *iffalse = _gvn.transform( new IfFalseNode(iff_le) ); 1318 { PreserveJVMState pjvms(this); 1319 set_control(iffalse); 1320 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 1321 } 1322 set_control(iftrue); 1323 } 1324 1325 } else { 1326 // mid is a range, not a singleton, so treat mid..hi as a unit 1327 float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi); 1328 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt)); 1329 1330 // if there is a higher range, test for it and process it: 1331 if (mid == hi) { 1332 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index(), trim_ranges && cnt == 0); 1333 } else { 1334 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 1335 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 1336 { PreserveJVMState pjvms(this); 1337 set_control(iftrue); 1338 jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1); 1339 } 1340 set_control(iffalse); 1341 } 1342 } 1343 1344 // in any case, process the lower range 1345 if (mid == lo) { 1346 if (mid->is_singleton()) { 1347 jump_switch_ranges(key_val, lo+1, hi, switch_depth+1); 1348 } else { 1349 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1350 } 1351 } else { 1352 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 1353 } 1354 } 1355 1356 // Decrease pred_count for each successor after all is done. 1357 if (switch_depth == 0) { 1358 int unique_successors = switch_block->num_successors(); 1359 for (int i = 0; i < unique_successors; i++) { 1360 Block* target = switch_block->successor_at(i); 1361 // Throw away the pre-allocated path for each unique successor. 1362 target->next_path_num(); 1363 } 1364 } 1365 1366 #ifndef PRODUCT 1367 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 1368 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 1369 SwitchRange* r; 1370 int nsing = 0; 1371 for( r = lo; r <= hi; r++ ) { 1372 if( r->is_singleton() ) nsing++; 1373 } 1374 tty->print(">>> "); 1375 _method->print_short_name(); 1376 tty->print_cr(" switch decision tree"); 1377 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 1378 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 1379 if (_max_switch_depth > _est_switch_depth) { 1380 tty->print_cr("******** BAD SWITCH DEPTH ********"); 1381 } 1382 tty->print(" "); 1383 for( r = lo; r <= hi; r++ ) { 1384 r->print(); 1385 } 1386 tty->cr(); 1387 } 1388 #endif 1389 } 1390 1391 void Parse::modf() { 1392 Node *f2 = pop(); 1393 Node *f1 = pop(); 1394 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 1395 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 1396 "frem", NULL, //no memory effects 1397 f1, f2); 1398 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1399 1400 push(res); 1401 } 1402 1403 void Parse::modd() { 1404 Node *d2 = pop_pair(); 1405 Node *d1 = pop_pair(); 1406 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 1407 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 1408 "drem", NULL, //no memory effects 1409 d1, top(), d2, top()); 1410 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1411 1412 #ifdef ASSERT 1413 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 1414 assert(res_top == top(), "second value must be top"); 1415 #endif 1416 1417 push_pair(res_d); 1418 } 1419 1420 void Parse::l2f() { 1421 Node* f2 = pop(); 1422 Node* f1 = pop(); 1423 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 1424 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 1425 "l2f", NULL, //no memory effects 1426 f1, f2); 1427 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1428 1429 push(res); 1430 } 1431 1432 void Parse::do_irem() { 1433 // Must keep both values on the expression-stack during null-check 1434 zero_check_int(peek()); 1435 // Compile-time detect of null-exception? 1436 if (stopped()) return; 1437 1438 Node* b = pop(); 1439 Node* a = pop(); 1440 1441 const Type *t = _gvn.type(b); 1442 if (t != Type::TOP) { 1443 const TypeInt *ti = t->is_int(); 1444 if (ti->is_con()) { 1445 int divisor = ti->get_con(); 1446 // check for positive power of 2 1447 if (divisor > 0 && 1448 (divisor & ~(divisor-1)) == divisor) { 1449 // yes ! 1450 Node *mask = _gvn.intcon((divisor - 1)); 1451 // Sigh, must handle negative dividends 1452 Node *zero = _gvn.intcon(0); 1453 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt, PROB_FAIR, COUNT_UNKNOWN); 1454 Node *iff = _gvn.transform( new IfFalseNode(ifff) ); 1455 Node *ift = _gvn.transform( new IfTrueNode (ifff) ); 1456 Node *reg = jump_if_join(ift, iff); 1457 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 1458 // Negative path; negate/and/negate 1459 Node *neg = _gvn.transform( new SubINode(zero, a) ); 1460 Node *andn= _gvn.transform( new AndINode(neg, mask) ); 1461 Node *negn= _gvn.transform( new SubINode(zero, andn) ); 1462 phi->init_req(1, negn); 1463 // Fast positive case 1464 Node *andx = _gvn.transform( new AndINode(a, mask) ); 1465 phi->init_req(2, andx); 1466 // Push the merge 1467 push( _gvn.transform(phi) ); 1468 return; 1469 } 1470 } 1471 } 1472 // Default case 1473 push( _gvn.transform( new ModINode(control(),a,b) ) ); 1474 } 1475 1476 // Handle jsr and jsr_w bytecode 1477 void Parse::do_jsr() { 1478 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 1479 1480 // Store information about current state, tagged with new _jsr_bci 1481 int return_bci = iter().next_bci(); 1482 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 1483 1484 // Update method data 1485 profile_taken_branch(jsr_bci); 1486 1487 // The way we do things now, there is only one successor block 1488 // for the jsr, because the target code is cloned by ciTypeFlow. 1489 Block* target = successor_for_bci(jsr_bci); 1490 1491 // What got pushed? 1492 const Type* ret_addr = target->peek(); 1493 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 1494 1495 // Effect on jsr on stack 1496 push(_gvn.makecon(ret_addr)); 1497 1498 // Flow to the jsr. 1499 merge(jsr_bci); 1500 } 1501 1502 // Handle ret bytecode 1503 void Parse::do_ret() { 1504 // Find to whom we return. 1505 assert(block()->num_successors() == 1, "a ret can only go one place now"); 1506 Block* target = block()->successor_at(0); 1507 assert(!target->is_ready(), "our arrival must be expected"); 1508 profile_ret(target->flow()->start()); 1509 int pnum = target->next_path_num(); 1510 merge_common(target, pnum); 1511 } 1512 1513 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 1514 if (btest != BoolTest::eq && btest != BoolTest::ne) { 1515 // Only ::eq and ::ne are supported for profile injection. 1516 return false; 1517 } 1518 if (test->is_Cmp() && 1519 test->in(1)->Opcode() == Op_ProfileBoolean) { 1520 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 1521 int false_cnt = profile->false_count(); 1522 int true_cnt = profile->true_count(); 1523 1524 // Counts matching depends on the actual test operation (::eq or ::ne). 1525 // No need to scale the counts because profile injection was designed 1526 // to feed exact counts into VM. 1527 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 1528 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 1529 1530 profile->consume(); 1531 return true; 1532 } 1533 return false; 1534 } 1535 //--------------------------dynamic_branch_prediction-------------------------- 1536 // Try to gather dynamic branch prediction behavior. Return a probability 1537 // of the branch being taken and set the "cnt" field. Returns a -1.0 1538 // if we need to use static prediction for some reason. 1539 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 1540 ResourceMark rm; 1541 1542 cnt = COUNT_UNKNOWN; 1543 1544 int taken = 0; 1545 int not_taken = 0; 1546 1547 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 1548 1549 if (use_mdo) { 1550 // Use MethodData information if it is available 1551 // FIXME: free the ProfileData structure 1552 ciMethodData* methodData = method()->method_data(); 1553 if (!methodData->is_mature()) return PROB_UNKNOWN; 1554 ciProfileData* data = methodData->bci_to_data(bci()); 1555 if (data == NULL) { 1556 return PROB_UNKNOWN; 1557 } 1558 if (!data->is_JumpData()) return PROB_UNKNOWN; 1559 1560 // get taken and not taken values 1561 taken = data->as_JumpData()->taken(); 1562 not_taken = 0; 1563 if (data->is_BranchData()) { 1564 not_taken = data->as_BranchData()->not_taken(); 1565 } 1566 1567 // scale the counts to be commensurate with invocation counts: 1568 taken = method()->scale_count(taken); 1569 not_taken = method()->scale_count(not_taken); 1570 } 1571 1572 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 1573 // We also check that individual counters are positive first, otherwise the sum can become positive. 1574 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 1575 if (C->log() != NULL) { 1576 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 1577 } 1578 return PROB_UNKNOWN; 1579 } 1580 1581 // Compute frequency that we arrive here 1582 float sum = taken + not_taken; 1583 // Adjust, if this block is a cloned private block but the 1584 // Jump counts are shared. Taken the private counts for 1585 // just this path instead of the shared counts. 1586 if( block()->count() > 0 ) 1587 sum = block()->count(); 1588 cnt = sum / FreqCountInvocations; 1589 1590 // Pin probability to sane limits 1591 float prob; 1592 if( !taken ) 1593 prob = (0+PROB_MIN) / 2; 1594 else if( !not_taken ) 1595 prob = (1+PROB_MAX) / 2; 1596 else { // Compute probability of true path 1597 prob = (float)taken / (float)(taken + not_taken); 1598 if (prob > PROB_MAX) prob = PROB_MAX; 1599 if (prob < PROB_MIN) prob = PROB_MIN; 1600 } 1601 1602 assert((cnt > 0.0f) && (prob > 0.0f), 1603 "Bad frequency assignment in if"); 1604 1605 if (C->log() != NULL) { 1606 const char* prob_str = NULL; 1607 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 1608 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 1609 char prob_str_buf[30]; 1610 if (prob_str == NULL) { 1611 jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob); 1612 prob_str = prob_str_buf; 1613 } 1614 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 1615 iter().get_dest(), taken, not_taken, cnt, prob_str); 1616 } 1617 return prob; 1618 } 1619 1620 //-----------------------------branch_prediction------------------------------- 1621 float Parse::branch_prediction(float& cnt, 1622 BoolTest::mask btest, 1623 int target_bci, 1624 Node* test) { 1625 float prob = dynamic_branch_prediction(cnt, btest, test); 1626 // If prob is unknown, switch to static prediction 1627 if (prob != PROB_UNKNOWN) return prob; 1628 1629 prob = PROB_FAIR; // Set default value 1630 if (btest == BoolTest::eq) // Exactly equal test? 1631 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 1632 else if (btest == BoolTest::ne) 1633 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 1634 1635 // If this is a conditional test guarding a backwards branch, 1636 // assume its a loop-back edge. Make it a likely taken branch. 1637 if (target_bci < bci()) { 1638 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 1639 // Since it's an OSR, we probably have profile data, but since 1640 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 1641 // Let's make a special check here for completely zero counts. 1642 ciMethodData* methodData = method()->method_data(); 1643 if (!methodData->is_empty()) { 1644 ciProfileData* data = methodData->bci_to_data(bci()); 1645 // Only stop for truly zero counts, which mean an unknown part 1646 // of the OSR-ed method, and we want to deopt to gather more stats. 1647 // If you have ANY counts, then this loop is simply 'cold' relative 1648 // to the OSR loop. 1649 if (data == NULL || 1650 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 1651 // This is the only way to return PROB_UNKNOWN: 1652 return PROB_UNKNOWN; 1653 } 1654 } 1655 } 1656 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 1657 } 1658 1659 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 1660 return prob; 1661 } 1662 1663 // The magic constants are chosen so as to match the output of 1664 // branch_prediction() when the profile reports a zero taken count. 1665 // It is important to distinguish zero counts unambiguously, because 1666 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 1667 // very small but nonzero probabilities, which if confused with zero 1668 // counts would keep the program recompiling indefinitely. 1669 bool Parse::seems_never_taken(float prob) const { 1670 return prob < PROB_MIN; 1671 } 1672 1673 // True if the comparison seems to be the kind that will not change its 1674 // statistics from true to false. See comments in adjust_map_after_if. 1675 // This question is only asked along paths which are already 1676 // classifed as untaken (by seems_never_taken), so really, 1677 // if a path is never taken, its controlling comparison is 1678 // already acting in a stable fashion. If the comparison 1679 // seems stable, we will put an expensive uncommon trap 1680 // on the untaken path. 1681 bool Parse::seems_stable_comparison() const { 1682 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 1683 return false; 1684 } 1685 return true; 1686 } 1687 1688 //-------------------------------repush_if_args-------------------------------- 1689 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 1690 inline int Parse::repush_if_args() { 1691 if (PrintOpto && WizardMode) { 1692 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 1693 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 1694 method()->print_name(); tty->cr(); 1695 } 1696 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 1697 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 1698 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 1699 assert(argument(0) != NULL, "must exist"); 1700 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 1701 inc_sp(bc_depth); 1702 return bc_depth; 1703 } 1704 1705 //----------------------------------do_ifnull---------------------------------- 1706 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 1707 int target_bci = iter().get_dest(); 1708 1709 Block* branch_block = successor_for_bci(target_bci); 1710 Block* next_block = successor_for_bci(iter().next_bci()); 1711 1712 float cnt; 1713 float prob = branch_prediction(cnt, btest, target_bci, c); 1714 if (prob == PROB_UNKNOWN) { 1715 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 1716 if (PrintOpto && Verbose) { 1717 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1718 } 1719 repush_if_args(); // to gather stats on loop 1720 // We need to mark this branch as taken so that if we recompile we will 1721 // see that it is possible. In the tiered system the interpreter doesn't 1722 // do profiling and by the time we get to the lower tier from the interpreter 1723 // the path may be cold again. Make sure it doesn't look untaken 1724 profile_taken_branch(target_bci, !ProfileInterpreter); 1725 uncommon_trap(Deoptimization::Reason_unreached, 1726 Deoptimization::Action_reinterpret, 1727 NULL, "cold"); 1728 if (C->eliminate_boxing()) { 1729 // Mark the successor blocks as parsed 1730 branch_block->next_path_num(); 1731 next_block->next_path_num(); 1732 } 1733 return; 1734 } 1735 1736 NOT_PRODUCT(explicit_null_checks_inserted++); 1737 1738 // Generate real control flow 1739 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1740 1741 // Sanity check the probability value 1742 assert(prob > 0.0f,"Bad probability in Parser"); 1743 // Need xform to put node in hash table 1744 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1745 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1746 // True branch 1747 { PreserveJVMState pjvms(this); 1748 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1749 set_control(iftrue); 1750 1751 if (stopped()) { // Path is dead? 1752 NOT_PRODUCT(explicit_null_checks_elided++); 1753 if (C->eliminate_boxing()) { 1754 // Mark the successor block as parsed 1755 branch_block->next_path_num(); 1756 } 1757 } else { // Path is live. 1758 // Update method data 1759 profile_taken_branch(target_bci); 1760 adjust_map_after_if(btest, c, prob, branch_block); 1761 if (!stopped()) { 1762 merge(target_bci); 1763 } 1764 } 1765 } 1766 1767 // False branch 1768 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1769 set_control(iffalse); 1770 1771 if (stopped()) { // Path is dead? 1772 NOT_PRODUCT(explicit_null_checks_elided++); 1773 if (C->eliminate_boxing()) { 1774 // Mark the successor block as parsed 1775 next_block->next_path_num(); 1776 } 1777 } else { // Path is live. 1778 // Update method data 1779 profile_not_taken_branch(); 1780 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block); 1781 } 1782 } 1783 1784 //------------------------------------do_if------------------------------------ 1785 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) { 1786 int target_bci = iter().get_dest(); 1787 1788 Block* branch_block = successor_for_bci(target_bci); 1789 Block* next_block = successor_for_bci(iter().next_bci()); 1790 1791 float cnt; 1792 float prob = branch_prediction(cnt, btest, target_bci, c); 1793 float untaken_prob = 1.0 - prob; 1794 1795 if (prob == PROB_UNKNOWN) { 1796 if (PrintOpto && Verbose) { 1797 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1798 } 1799 repush_if_args(); // to gather stats on loop 1800 // We need to mark this branch as taken so that if we recompile we will 1801 // see that it is possible. In the tiered system the interpreter doesn't 1802 // do profiling and by the time we get to the lower tier from the interpreter 1803 // the path may be cold again. Make sure it doesn't look untaken 1804 profile_taken_branch(target_bci, !ProfileInterpreter); 1805 uncommon_trap(Deoptimization::Reason_unreached, 1806 Deoptimization::Action_reinterpret, 1807 NULL, "cold"); 1808 if (C->eliminate_boxing()) { 1809 // Mark the successor blocks as parsed 1810 branch_block->next_path_num(); 1811 next_block->next_path_num(); 1812 } 1813 return; 1814 } 1815 1816 // Sanity check the probability value 1817 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1818 1819 bool taken_if_true = true; 1820 // Convert BoolTest to canonical form: 1821 if (!BoolTest(btest).is_canonical()) { 1822 btest = BoolTest(btest).negate(); 1823 taken_if_true = false; 1824 // prob is NOT updated here; it remains the probability of the taken 1825 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1826 } 1827 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1828 1829 Node* tst0 = new BoolNode(c, btest); 1830 Node* tst = _gvn.transform(tst0); 1831 BoolTest::mask taken_btest = BoolTest::illegal; 1832 BoolTest::mask untaken_btest = BoolTest::illegal; 1833 1834 if (tst->is_Bool()) { 1835 // Refresh c from the transformed bool node, since it may be 1836 // simpler than the original c. Also re-canonicalize btest. 1837 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1838 // That can arise from statements like: if (x instanceof C) ... 1839 if (tst != tst0) { 1840 // Canonicalize one more time since transform can change it. 1841 btest = tst->as_Bool()->_test._test; 1842 if (!BoolTest(btest).is_canonical()) { 1843 // Reverse edges one more time... 1844 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1845 btest = tst->as_Bool()->_test._test; 1846 assert(BoolTest(btest).is_canonical(), "sanity"); 1847 taken_if_true = !taken_if_true; 1848 } 1849 c = tst->in(1); 1850 } 1851 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1852 taken_btest = taken_if_true ? btest : neg_btest; 1853 untaken_btest = taken_if_true ? neg_btest : btest; 1854 } 1855 1856 // Generate real control flow 1857 float true_prob = (taken_if_true ? prob : untaken_prob); 1858 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1859 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1860 Node* taken_branch = new IfTrueNode(iff); 1861 Node* untaken_branch = new IfFalseNode(iff); 1862 if (!taken_if_true) { // Finish conversion to canonical form 1863 Node* tmp = taken_branch; 1864 taken_branch = untaken_branch; 1865 untaken_branch = tmp; 1866 } 1867 1868 // Branch is taken: 1869 { PreserveJVMState pjvms(this); 1870 taken_branch = _gvn.transform(taken_branch); 1871 set_control(taken_branch); 1872 1873 if (stopped()) { 1874 if (C->eliminate_boxing() && !new_path) { 1875 // Mark the successor block as parsed (if we haven't created a new path) 1876 branch_block->next_path_num(); 1877 } 1878 } else { 1879 // Update method data 1880 profile_taken_branch(target_bci); 1881 adjust_map_after_if(taken_btest, c, prob, branch_block); 1882 if (!stopped()) { 1883 if (new_path) { 1884 // Merge by using a new path 1885 merge_new_path(target_bci); 1886 } else if (ctrl_taken != NULL) { 1887 // Don't merge but save taken branch to be wired by caller 1888 *ctrl_taken = control(); 1889 } else { 1890 merge(target_bci); 1891 } 1892 } 1893 } 1894 } 1895 1896 untaken_branch = _gvn.transform(untaken_branch); 1897 set_control(untaken_branch); 1898 1899 // Branch not taken. 1900 if (stopped() && ctrl_taken == NULL) { 1901 if (C->eliminate_boxing()) { 1902 // Mark the successor block as parsed (if caller does not re-wire control flow) 1903 next_block->next_path_num(); 1904 } 1905 } else { 1906 // Update method data 1907 profile_not_taken_branch(); 1908 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block); 1909 } 1910 } 1911 1912 void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) { 1913 ciMethod* subst_method = ciEnv::current()->ValueBootstrapMethods_klass()->find_method(ciSymbol::isSubstitutable_name(), ciSymbol::object_object_boolean_signature()); 1914 // If current method is ValueBootstrapMethods::isSubstitutable(), 1915 // compile the acmp as a regular pointer comparison otherwise we 1916 // could call ValueBootstrapMethods::isSubstitutable() back 1917 if (!EnableValhalla || (method() == subst_method)) { 1918 Node* cmp = CmpP(a, b); 1919 cmp = optimize_cmp_with_klass(cmp); 1920 do_if(btest, cmp); 1921 return; 1922 } 1923 1924 // Substitutability test 1925 if (a->is_ValueType()) { 1926 inc_sp(2); 1927 a = a->as_ValueType()->allocate(this, true)->get_oop(); 1928 dec_sp(2); 1929 } 1930 if (b->is_ValueType()) { 1931 inc_sp(2); 1932 b = b->as_ValueType()->allocate(this, true)->get_oop(); 1933 dec_sp(2); 1934 } 1935 1936 const TypeOopPtr* ta = _gvn.type(a)->isa_oopptr(); 1937 const TypeOopPtr* tb = _gvn.type(b)->isa_oopptr(); 1938 1939 if (ta == NULL || !ta->can_be_value_type_raw() || 1940 tb == NULL || !tb->can_be_value_type_raw()) { 1941 Node* cmp = CmpP(a, b); 1942 cmp = optimize_cmp_with_klass(cmp); 1943 do_if(btest, cmp); 1944 return; 1945 } 1946 1947 Node* cmp = CmpP(a, b); 1948 cmp = optimize_cmp_with_klass(cmp); 1949 Node* eq_region = NULL; 1950 if (btest == BoolTest::eq) { 1951 do_if(btest, cmp, true); 1952 if (stopped()) { 1953 return; 1954 } 1955 } else { 1956 assert(btest == BoolTest::ne, "only eq or ne"); 1957 Node* is_not_equal = NULL; 1958 eq_region = new RegionNode(3); 1959 { 1960 PreserveJVMState pjvms(this); 1961 do_if(btest, cmp, false, &is_not_equal); 1962 if (!stopped()) { 1963 eq_region->init_req(1, control()); 1964 } 1965 } 1966 if (is_not_equal == NULL || is_not_equal->is_top()) { 1967 record_for_igvn(eq_region); 1968 set_control(_gvn.transform(eq_region)); 1969 return; 1970 } 1971 set_control(is_not_equal); 1972 } 1973 // Pointers not equal, check for values 1974 Node* ne_region = new RegionNode(6); 1975 inc_sp(2); 1976 Node* null_ctl = top(); 1977 Node* not_null_a = null_check_oop(a, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); 1978 dec_sp(2); 1979 ne_region->init_req(1, null_ctl); 1980 if (stopped()) { 1981 record_for_igvn(ne_region); 1982 set_control(_gvn.transform(ne_region)); 1983 if (btest == BoolTest::ne) { 1984 { 1985 PreserveJVMState pjvms(this); 1986 int target_bci = iter().get_dest(); 1987 merge(target_bci); 1988 } 1989 record_for_igvn(eq_region); 1990 set_control(_gvn.transform(eq_region)); 1991 } 1992 return; 1993 } 1994 1995 Node* is_value = is_always_locked(not_null_a); 1996 Node* value_mask = _gvn.MakeConX(markWord::always_locked_pattern); 1997 Node* is_value_cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); 1998 Node* is_value_bol = _gvn.transform(new BoolNode(is_value_cmp, BoolTest::ne)); 1999 IfNode* is_value_iff = create_and_map_if(control(), is_value_bol, PROB_FAIR, COUNT_UNKNOWN); 2000 Node* not_value = _gvn.transform(new IfTrueNode(is_value_iff)); 2001 set_control(_gvn.transform(new IfFalseNode(is_value_iff))); 2002 ne_region->init_req(2, not_value); 2003 2004 // One of the 2 pointers refers to a value, check if both are of 2005 // the same class 2006 inc_sp(2); 2007 null_ctl = top(); 2008 Node* not_null_b = null_check_oop(b, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); 2009 dec_sp(2); 2010 ne_region->init_req(3, null_ctl); 2011 if (stopped()) { 2012 record_for_igvn(ne_region); 2013 set_control(_gvn.transform(ne_region)); 2014 if (btest == BoolTest::ne) { 2015 { 2016 PreserveJVMState pjvms(this); 2017 int target_bci = iter().get_dest(); 2018 merge(target_bci); 2019 } 2020 record_for_igvn(eq_region); 2021 set_control(_gvn.transform(eq_region)); 2022 } 2023 return; 2024 } 2025 Node* kls_a = load_object_klass(not_null_a); 2026 Node* kls_b = load_object_klass(not_null_b); 2027 Node* kls_cmp = CmpP(kls_a, kls_b); 2028 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne)); 2029 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN); 2030 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff)); 2031 set_control(_gvn.transform(new IfFalseNode(kls_iff))); 2032 ne_region->init_req(4, kls_ne); 2033 2034 if (stopped()) { 2035 record_for_igvn(ne_region); 2036 set_control(_gvn.transform(ne_region)); 2037 if (btest == BoolTest::ne) { 2038 { 2039 PreserveJVMState pjvms(this); 2040 int target_bci = iter().get_dest(); 2041 merge(target_bci); 2042 } 2043 record_for_igvn(eq_region); 2044 set_control(_gvn.transform(eq_region)); 2045 } 2046 return; 2047 } 2048 // Both are values of the same class, we need to perform a 2049 // substitutability test. Delegate to 2050 // ValueBootstrapMethods::isSubstitutable(). 2051 2052 Node* ne_io_phi = PhiNode::make(ne_region, i_o()); 2053 Node* mem = reset_memory(); 2054 Node* ne_mem_phi = PhiNode::make(ne_region, mem); 2055 2056 Node* eq_io_phi = NULL; 2057 Node* eq_mem_phi = NULL; 2058 if (eq_region != NULL) { 2059 eq_io_phi = PhiNode::make(eq_region, i_o()); 2060 eq_mem_phi = PhiNode::make(eq_region, mem); 2061 } 2062 2063 set_all_memory(mem); 2064 2065 kill_dead_locals(); 2066 CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method, bci()); 2067 call->set_override_symbolic_info(true); 2068 call->init_req(TypeFunc::Parms, not_null_a); 2069 call->init_req(TypeFunc::Parms+1, not_null_b); 2070 inc_sp(2); 2071 set_edges_for_java_call(call, false, false); 2072 Node* ret = set_results_for_java_call(call, false, true); 2073 dec_sp(2); 2074 2075 // Test the return value of ValueBootstrapMethods::isSubstitutable() 2076 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1))); 2077 Node* ctl = C->top(); 2078 if (btest == BoolTest::eq) { 2079 PreserveJVMState pjvms(this); 2080 do_if(btest, subst_cmp); 2081 if (!stopped()) { 2082 ctl = control(); 2083 } 2084 } else { 2085 assert(btest == BoolTest::ne, "only eq or ne"); 2086 PreserveJVMState pjvms(this); 2087 do_if(btest, subst_cmp, false, &ctl); 2088 if (!stopped()) { 2089 eq_region->init_req(2, control()); 2090 eq_io_phi->init_req(2, i_o()); 2091 eq_mem_phi->init_req(2, reset_memory()); 2092 } 2093 } 2094 ne_region->init_req(5, ctl); 2095 ne_io_phi->init_req(5, i_o()); 2096 ne_mem_phi->init_req(5, reset_memory()); 2097 2098 record_for_igvn(ne_region); 2099 set_control(_gvn.transform(ne_region)); 2100 set_i_o(_gvn.transform(ne_io_phi)); 2101 set_all_memory(_gvn.transform(ne_mem_phi)); 2102 2103 if (btest == BoolTest::ne) { 2104 { 2105 PreserveJVMState pjvms(this); 2106 int target_bci = iter().get_dest(); 2107 merge(target_bci); 2108 } 2109 2110 record_for_igvn(eq_region); 2111 set_control(_gvn.transform(eq_region)); 2112 set_i_o(_gvn.transform(eq_io_phi)); 2113 set_all_memory(_gvn.transform(eq_mem_phi)); 2114 } 2115 } 2116 2117 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 2118 // Don't want to speculate on uncommon traps when running with -Xcomp 2119 if (!UseInterpreter) { 2120 return false; 2121 } 2122 return (seems_never_taken(prob) && seems_stable_comparison()); 2123 } 2124 2125 void Parse::maybe_add_predicate_after_if(Block* path) { 2126 if (path->is_SEL_head() && path->preds_parsed() == 0) { 2127 // Add predicates at bci of if dominating the loop so traps can be 2128 // recorded on the if's profile data 2129 int bc_depth = repush_if_args(); 2130 add_predicate(); 2131 dec_sp(bc_depth); 2132 path->set_has_predicates(); 2133 } 2134 } 2135 2136 2137 //----------------------------adjust_map_after_if------------------------------ 2138 // Adjust the JVM state to reflect the result of taking this path. 2139 // Basically, it means inspecting the CmpNode controlling this 2140 // branch, seeing how it constrains a tested value, and then 2141 // deciding if it's worth our while to encode this constraint 2142 // as graph nodes in the current abstract interpretation map. 2143 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) { 2144 if (!c->is_Cmp()) { 2145 maybe_add_predicate_after_if(path); 2146 return; 2147 } 2148 2149 if (stopped() || btest == BoolTest::illegal) { 2150 return; // nothing to do 2151 } 2152 2153 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 2154 2155 if (path_is_suitable_for_uncommon_trap(prob)) { 2156 repush_if_args(); 2157 uncommon_trap(Deoptimization::Reason_unstable_if, 2158 Deoptimization::Action_reinterpret, 2159 NULL, 2160 (is_fallthrough ? "taken always" : "taken never")); 2161 return; 2162 } 2163 2164 Node* val = c->in(1); 2165 Node* con = c->in(2); 2166 const Type* tcon = _gvn.type(con); 2167 const Type* tval = _gvn.type(val); 2168 bool have_con = tcon->singleton(); 2169 if (tval->singleton()) { 2170 if (!have_con) { 2171 // Swap, so constant is in con. 2172 con = val; 2173 tcon = tval; 2174 val = c->in(2); 2175 tval = _gvn.type(val); 2176 btest = BoolTest(btest).commute(); 2177 have_con = true; 2178 } else { 2179 // Do we have two constants? Then leave well enough alone. 2180 have_con = false; 2181 } 2182 } 2183 if (!have_con) { // remaining adjustments need a con 2184 maybe_add_predicate_after_if(path); 2185 return; 2186 } 2187 2188 sharpen_type_after_if(btest, con, tcon, val, tval); 2189 maybe_add_predicate_after_if(path); 2190 } 2191 2192 2193 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 2194 Node* ldk; 2195 if (n->is_DecodeNKlass()) { 2196 if (n->in(1)->Opcode() != Op_LoadNKlass) { 2197 return NULL; 2198 } else { 2199 ldk = n->in(1); 2200 } 2201 } else if (n->Opcode() != Op_LoadKlass) { 2202 return NULL; 2203 } else { 2204 ldk = n; 2205 } 2206 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 2207 2208 Node* adr = ldk->in(MemNode::Address); 2209 intptr_t off = 0; 2210 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 2211 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 2212 return NULL; 2213 const TypePtr* tp = gvn->type(obj)->is_ptr(); 2214 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 2215 return NULL; 2216 2217 return obj; 2218 } 2219 2220 void Parse::sharpen_type_after_if(BoolTest::mask btest, 2221 Node* con, const Type* tcon, 2222 Node* val, const Type* tval) { 2223 // Look for opportunities to sharpen the type of a node 2224 // whose klass is compared with a constant klass. 2225 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 2226 Node* obj = extract_obj_from_klass_load(&_gvn, val); 2227 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 2228 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 2229 // Found: 2230 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 2231 // or the narrowOop equivalent. 2232 const Type* obj_type = _gvn.type(obj); 2233 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 2234 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 2235 tboth->higher_equal(obj_type)) { 2236 // obj has to be of the exact type Foo if the CmpP succeeds. 2237 int obj_in_map = map()->find_edge(obj); 2238 JVMState* jvms = this->jvms(); 2239 if (obj_in_map >= 0 && 2240 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 2241 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 2242 const Type* tcc = ccast->as_Type()->type(); 2243 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 2244 // Delay transform() call to allow recovery of pre-cast value 2245 // at the control merge. 2246 _gvn.set_type_bottom(ccast); 2247 record_for_igvn(ccast); 2248 // Here's the payoff. 2249 replace_in_map(obj, ccast); 2250 } 2251 } 2252 } 2253 } 2254 2255 int val_in_map = map()->find_edge(val); 2256 if (val_in_map < 0) return; // replace_in_map would be useless 2257 { 2258 JVMState* jvms = this->jvms(); 2259 if (!(jvms->is_loc(val_in_map) || 2260 jvms->is_stk(val_in_map))) 2261 return; // again, it would be useless 2262 } 2263 2264 // Check for a comparison to a constant, and "know" that the compared 2265 // value is constrained on this path. 2266 assert(tcon->singleton(), ""); 2267 ConstraintCastNode* ccast = NULL; 2268 Node* cast = NULL; 2269 2270 switch (btest) { 2271 case BoolTest::eq: // Constant test? 2272 { 2273 const Type* tboth = tcon->join_speculative(tval); 2274 if (tboth == tval) break; // Nothing to gain. 2275 if (tcon->isa_int()) { 2276 ccast = new CastIINode(val, tboth); 2277 } else if (tcon == TypePtr::NULL_PTR) { 2278 // Cast to null, but keep the pointer identity temporarily live. 2279 ccast = new CastPPNode(val, tboth); 2280 } else { 2281 const TypeF* tf = tcon->isa_float_constant(); 2282 const TypeD* td = tcon->isa_double_constant(); 2283 // Exclude tests vs float/double 0 as these could be 2284 // either +0 or -0. Just because you are equal to +0 2285 // doesn't mean you ARE +0! 2286 // Note, following code also replaces Long and Oop values. 2287 if ((!tf || tf->_f != 0.0) && 2288 (!td || td->_d != 0.0)) 2289 cast = con; // Replace non-constant val by con. 2290 } 2291 } 2292 break; 2293 2294 case BoolTest::ne: 2295 if (tcon == TypePtr::NULL_PTR) { 2296 cast = cast_not_null(val, false); 2297 } 2298 break; 2299 2300 default: 2301 // (At this point we could record int range types with CastII.) 2302 break; 2303 } 2304 2305 if (ccast != NULL) { 2306 const Type* tcc = ccast->as_Type()->type(); 2307 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 2308 // Delay transform() call to allow recovery of pre-cast value 2309 // at the control merge. 2310 ccast->set_req(0, control()); 2311 _gvn.set_type_bottom(ccast); 2312 record_for_igvn(ccast); 2313 cast = ccast; 2314 } 2315 2316 if (cast != NULL) { // Here's the payoff. 2317 replace_in_map(val, cast); 2318 } 2319 } 2320 2321 /** 2322 * Use speculative type to optimize CmpP node: if comparison is 2323 * against the low level class, cast the object to the speculative 2324 * type if any. CmpP should then go away. 2325 * 2326 * @param c expected CmpP node 2327 * @return result of CmpP on object casted to speculative type 2328 * 2329 */ 2330 Node* Parse::optimize_cmp_with_klass(Node* c) { 2331 // If this is transformed by the _gvn to a comparison with the low 2332 // level klass then we may be able to use speculation 2333 if (c->Opcode() == Op_CmpP && 2334 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 2335 c->in(2)->is_Con()) { 2336 Node* load_klass = NULL; 2337 Node* decode = NULL; 2338 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 2339 decode = c->in(1); 2340 load_klass = c->in(1)->in(1); 2341 } else { 2342 load_klass = c->in(1); 2343 } 2344 if (load_klass->in(2)->is_AddP()) { 2345 Node* addp = load_klass->in(2); 2346 Node* obj = addp->in(AddPNode::Address); 2347 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 2348 if (obj_type->speculative_type_not_null() != NULL) { 2349 ciKlass* k = obj_type->speculative_type(); 2350 inc_sp(2); 2351 obj = maybe_cast_profiled_obj(obj, k); 2352 dec_sp(2); 2353 if (obj->is_ValueType()) { 2354 assert(obj->as_ValueType()->is_allocated(&_gvn), "must be allocated"); 2355 obj = obj->as_ValueType()->get_oop(); 2356 } 2357 // Make the CmpP use the casted obj 2358 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 2359 load_klass = load_klass->clone(); 2360 load_klass->set_req(2, addp); 2361 load_klass = _gvn.transform(load_klass); 2362 if (decode != NULL) { 2363 decode = decode->clone(); 2364 decode->set_req(1, load_klass); 2365 load_klass = _gvn.transform(decode); 2366 } 2367 c = c->clone(); 2368 c->set_req(1, load_klass); 2369 c = _gvn.transform(c); 2370 } 2371 } 2372 } 2373 return c; 2374 } 2375 2376 //------------------------------do_one_bytecode-------------------------------- 2377 // Parse this bytecode, and alter the Parsers JVM->Node mapping 2378 void Parse::do_one_bytecode() { 2379 Node *a, *b, *c, *d; // Handy temps 2380 BoolTest::mask btest; 2381 int i; 2382 2383 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 2384 2385 if (C->check_node_count(NodeLimitFudgeFactor * 5, 2386 "out of nodes parsing method")) { 2387 return; 2388 } 2389 2390 #ifdef ASSERT 2391 // for setting breakpoints 2392 if (TraceOptoParse) { 2393 tty->print(" @"); 2394 dump_bci(bci()); 2395 tty->cr(); 2396 } 2397 #endif 2398 2399 switch (bc()) { 2400 case Bytecodes::_nop: 2401 // do nothing 2402 break; 2403 case Bytecodes::_lconst_0: 2404 push_pair(longcon(0)); 2405 break; 2406 2407 case Bytecodes::_lconst_1: 2408 push_pair(longcon(1)); 2409 break; 2410 2411 case Bytecodes::_fconst_0: 2412 push(zerocon(T_FLOAT)); 2413 break; 2414 2415 case Bytecodes::_fconst_1: 2416 push(makecon(TypeF::ONE)); 2417 break; 2418 2419 case Bytecodes::_fconst_2: 2420 push(makecon(TypeF::make(2.0f))); 2421 break; 2422 2423 case Bytecodes::_dconst_0: 2424 push_pair(zerocon(T_DOUBLE)); 2425 break; 2426 2427 case Bytecodes::_dconst_1: 2428 push_pair(makecon(TypeD::ONE)); 2429 break; 2430 2431 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 2432 case Bytecodes::_iconst_0: push(intcon( 0)); break; 2433 case Bytecodes::_iconst_1: push(intcon( 1)); break; 2434 case Bytecodes::_iconst_2: push(intcon( 2)); break; 2435 case Bytecodes::_iconst_3: push(intcon( 3)); break; 2436 case Bytecodes::_iconst_4: push(intcon( 4)); break; 2437 case Bytecodes::_iconst_5: push(intcon( 5)); break; 2438 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 2439 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 2440 case Bytecodes::_aconst_null: push(null()); break; 2441 case Bytecodes::_ldc: 2442 case Bytecodes::_ldc_w: 2443 case Bytecodes::_ldc2_w: 2444 // If the constant is unresolved, run this BC once in the interpreter. 2445 { 2446 ciConstant constant = iter().get_constant(); 2447 if (!constant.is_valid() || 2448 (constant.basic_type() == T_OBJECT && 2449 !constant.as_object()->is_loaded())) { 2450 int index = iter().get_constant_pool_index(); 2451 constantTag tag = iter().get_constant_pool_tag(index); 2452 uncommon_trap(Deoptimization::make_trap_request 2453 (Deoptimization::Reason_unloaded, 2454 Deoptimization::Action_reinterpret, 2455 index), 2456 NULL, tag.internal_name()); 2457 break; 2458 } 2459 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 2460 "must be java_mirror of klass"); 2461 const Type* con_type = Type::make_from_constant(constant); 2462 if (con_type != NULL) { 2463 push_node(con_type->basic_type(), makecon(con_type)); 2464 } 2465 } 2466 2467 break; 2468 2469 case Bytecodes::_aload_0: 2470 push( local(0) ); 2471 break; 2472 case Bytecodes::_aload_1: 2473 push( local(1) ); 2474 break; 2475 case Bytecodes::_aload_2: 2476 push( local(2) ); 2477 break; 2478 case Bytecodes::_aload_3: 2479 push( local(3) ); 2480 break; 2481 case Bytecodes::_aload: 2482 push( local(iter().get_index()) ); 2483 break; 2484 2485 case Bytecodes::_fload_0: 2486 case Bytecodes::_iload_0: 2487 push( local(0) ); 2488 break; 2489 case Bytecodes::_fload_1: 2490 case Bytecodes::_iload_1: 2491 push( local(1) ); 2492 break; 2493 case Bytecodes::_fload_2: 2494 case Bytecodes::_iload_2: 2495 push( local(2) ); 2496 break; 2497 case Bytecodes::_fload_3: 2498 case Bytecodes::_iload_3: 2499 push( local(3) ); 2500 break; 2501 case Bytecodes::_fload: 2502 case Bytecodes::_iload: 2503 push( local(iter().get_index()) ); 2504 break; 2505 case Bytecodes::_lload_0: 2506 push_pair_local( 0 ); 2507 break; 2508 case Bytecodes::_lload_1: 2509 push_pair_local( 1 ); 2510 break; 2511 case Bytecodes::_lload_2: 2512 push_pair_local( 2 ); 2513 break; 2514 case Bytecodes::_lload_3: 2515 push_pair_local( 3 ); 2516 break; 2517 case Bytecodes::_lload: 2518 push_pair_local( iter().get_index() ); 2519 break; 2520 2521 case Bytecodes::_dload_0: 2522 push_pair_local(0); 2523 break; 2524 case Bytecodes::_dload_1: 2525 push_pair_local(1); 2526 break; 2527 case Bytecodes::_dload_2: 2528 push_pair_local(2); 2529 break; 2530 case Bytecodes::_dload_3: 2531 push_pair_local(3); 2532 break; 2533 case Bytecodes::_dload: 2534 push_pair_local(iter().get_index()); 2535 break; 2536 case Bytecodes::_fstore_0: 2537 case Bytecodes::_istore_0: 2538 case Bytecodes::_astore_0: 2539 set_local( 0, pop() ); 2540 break; 2541 case Bytecodes::_fstore_1: 2542 case Bytecodes::_istore_1: 2543 case Bytecodes::_astore_1: 2544 set_local( 1, pop() ); 2545 break; 2546 case Bytecodes::_fstore_2: 2547 case Bytecodes::_istore_2: 2548 case Bytecodes::_astore_2: 2549 set_local( 2, pop() ); 2550 break; 2551 case Bytecodes::_fstore_3: 2552 case Bytecodes::_istore_3: 2553 case Bytecodes::_astore_3: 2554 set_local( 3, pop() ); 2555 break; 2556 case Bytecodes::_fstore: 2557 case Bytecodes::_istore: 2558 case Bytecodes::_astore: 2559 set_local( iter().get_index(), pop() ); 2560 break; 2561 // long stores 2562 case Bytecodes::_lstore_0: 2563 set_pair_local( 0, pop_pair() ); 2564 break; 2565 case Bytecodes::_lstore_1: 2566 set_pair_local( 1, pop_pair() ); 2567 break; 2568 case Bytecodes::_lstore_2: 2569 set_pair_local( 2, pop_pair() ); 2570 break; 2571 case Bytecodes::_lstore_3: 2572 set_pair_local( 3, pop_pair() ); 2573 break; 2574 case Bytecodes::_lstore: 2575 set_pair_local( iter().get_index(), pop_pair() ); 2576 break; 2577 2578 // double stores 2579 case Bytecodes::_dstore_0: 2580 set_pair_local( 0, dstore_rounding(pop_pair()) ); 2581 break; 2582 case Bytecodes::_dstore_1: 2583 set_pair_local( 1, dstore_rounding(pop_pair()) ); 2584 break; 2585 case Bytecodes::_dstore_2: 2586 set_pair_local( 2, dstore_rounding(pop_pair()) ); 2587 break; 2588 case Bytecodes::_dstore_3: 2589 set_pair_local( 3, dstore_rounding(pop_pair()) ); 2590 break; 2591 case Bytecodes::_dstore: 2592 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 2593 break; 2594 2595 case Bytecodes::_pop: dec_sp(1); break; 2596 case Bytecodes::_pop2: dec_sp(2); break; 2597 case Bytecodes::_swap: 2598 a = pop(); 2599 b = pop(); 2600 push(a); 2601 push(b); 2602 break; 2603 case Bytecodes::_dup: 2604 a = pop(); 2605 push(a); 2606 push(a); 2607 break; 2608 case Bytecodes::_dup_x1: 2609 a = pop(); 2610 b = pop(); 2611 push( a ); 2612 push( b ); 2613 push( a ); 2614 break; 2615 case Bytecodes::_dup_x2: 2616 a = pop(); 2617 b = pop(); 2618 c = pop(); 2619 push( a ); 2620 push( c ); 2621 push( b ); 2622 push( a ); 2623 break; 2624 case Bytecodes::_dup2: 2625 a = pop(); 2626 b = pop(); 2627 push( b ); 2628 push( a ); 2629 push( b ); 2630 push( a ); 2631 break; 2632 2633 case Bytecodes::_dup2_x1: 2634 // before: .. c, b, a 2635 // after: .. b, a, c, b, a 2636 // not tested 2637 a = pop(); 2638 b = pop(); 2639 c = pop(); 2640 push( b ); 2641 push( a ); 2642 push( c ); 2643 push( b ); 2644 push( a ); 2645 break; 2646 case Bytecodes::_dup2_x2: 2647 // before: .. d, c, b, a 2648 // after: .. b, a, d, c, b, a 2649 // not tested 2650 a = pop(); 2651 b = pop(); 2652 c = pop(); 2653 d = pop(); 2654 push( b ); 2655 push( a ); 2656 push( d ); 2657 push( c ); 2658 push( b ); 2659 push( a ); 2660 break; 2661 2662 case Bytecodes::_arraylength: { 2663 // Must do null-check with value on expression stack 2664 Node *ary = null_check(peek(), T_ARRAY); 2665 // Compile-time detect of null-exception? 2666 if (stopped()) return; 2667 a = pop(); 2668 push(load_array_length(a)); 2669 break; 2670 } 2671 2672 case Bytecodes::_baload: array_load(T_BYTE); break; 2673 case Bytecodes::_caload: array_load(T_CHAR); break; 2674 case Bytecodes::_iaload: array_load(T_INT); break; 2675 case Bytecodes::_saload: array_load(T_SHORT); break; 2676 case Bytecodes::_faload: array_load(T_FLOAT); break; 2677 case Bytecodes::_aaload: array_load(T_OBJECT); break; 2678 case Bytecodes::_laload: array_load(T_LONG); break; 2679 case Bytecodes::_daload: array_load(T_DOUBLE); break; 2680 case Bytecodes::_bastore: array_store(T_BYTE); break; 2681 case Bytecodes::_castore: array_store(T_CHAR); break; 2682 case Bytecodes::_iastore: array_store(T_INT); break; 2683 case Bytecodes::_sastore: array_store(T_SHORT); break; 2684 case Bytecodes::_fastore: array_store(T_FLOAT); break; 2685 case Bytecodes::_aastore: array_store(T_OBJECT); break; 2686 case Bytecodes::_lastore: array_store(T_LONG); break; 2687 case Bytecodes::_dastore: array_store(T_DOUBLE); break; 2688 2689 case Bytecodes::_getfield: 2690 do_getfield(); 2691 break; 2692 2693 case Bytecodes::_getstatic: 2694 do_getstatic(); 2695 break; 2696 2697 case Bytecodes::_putfield: 2698 do_putfield(); 2699 break; 2700 2701 case Bytecodes::_putstatic: 2702 do_putstatic(); 2703 break; 2704 2705 case Bytecodes::_irem: 2706 do_irem(); 2707 break; 2708 case Bytecodes::_idiv: 2709 // Must keep both values on the expression-stack during null-check 2710 zero_check_int(peek()); 2711 // Compile-time detect of null-exception? 2712 if (stopped()) return; 2713 b = pop(); 2714 a = pop(); 2715 push( _gvn.transform( new DivINode(control(),a,b) ) ); 2716 break; 2717 case Bytecodes::_imul: 2718 b = pop(); a = pop(); 2719 push( _gvn.transform( new MulINode(a,b) ) ); 2720 break; 2721 case Bytecodes::_iadd: 2722 b = pop(); a = pop(); 2723 push( _gvn.transform( new AddINode(a,b) ) ); 2724 break; 2725 case Bytecodes::_ineg: 2726 a = pop(); 2727 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 2728 break; 2729 case Bytecodes::_isub: 2730 b = pop(); a = pop(); 2731 push( _gvn.transform( new SubINode(a,b) ) ); 2732 break; 2733 case Bytecodes::_iand: 2734 b = pop(); a = pop(); 2735 push( _gvn.transform( new AndINode(a,b) ) ); 2736 break; 2737 case Bytecodes::_ior: 2738 b = pop(); a = pop(); 2739 push( _gvn.transform( new OrINode(a,b) ) ); 2740 break; 2741 case Bytecodes::_ixor: 2742 b = pop(); a = pop(); 2743 push( _gvn.transform( new XorINode(a,b) ) ); 2744 break; 2745 case Bytecodes::_ishl: 2746 b = pop(); a = pop(); 2747 push( _gvn.transform( new LShiftINode(a,b) ) ); 2748 break; 2749 case Bytecodes::_ishr: 2750 b = pop(); a = pop(); 2751 push( _gvn.transform( new RShiftINode(a,b) ) ); 2752 break; 2753 case Bytecodes::_iushr: 2754 b = pop(); a = pop(); 2755 push( _gvn.transform( new URShiftINode(a,b) ) ); 2756 break; 2757 2758 case Bytecodes::_fneg: 2759 a = pop(); 2760 b = _gvn.transform(new NegFNode (a)); 2761 push(b); 2762 break; 2763 2764 case Bytecodes::_fsub: 2765 b = pop(); 2766 a = pop(); 2767 c = _gvn.transform( new SubFNode(a,b) ); 2768 d = precision_rounding(c); 2769 push( d ); 2770 break; 2771 2772 case Bytecodes::_fadd: 2773 b = pop(); 2774 a = pop(); 2775 c = _gvn.transform( new AddFNode(a,b) ); 2776 d = precision_rounding(c); 2777 push( d ); 2778 break; 2779 2780 case Bytecodes::_fmul: 2781 b = pop(); 2782 a = pop(); 2783 c = _gvn.transform( new MulFNode(a,b) ); 2784 d = precision_rounding(c); 2785 push( d ); 2786 break; 2787 2788 case Bytecodes::_fdiv: 2789 b = pop(); 2790 a = pop(); 2791 c = _gvn.transform( new DivFNode(0,a,b) ); 2792 d = precision_rounding(c); 2793 push( d ); 2794 break; 2795 2796 case Bytecodes::_frem: 2797 if (Matcher::has_match_rule(Op_ModF)) { 2798 // Generate a ModF node. 2799 b = pop(); 2800 a = pop(); 2801 c = _gvn.transform( new ModFNode(0,a,b) ); 2802 d = precision_rounding(c); 2803 push( d ); 2804 } 2805 else { 2806 // Generate a call. 2807 modf(); 2808 } 2809 break; 2810 2811 case Bytecodes::_fcmpl: 2812 b = pop(); 2813 a = pop(); 2814 c = _gvn.transform( new CmpF3Node( a, b)); 2815 push(c); 2816 break; 2817 case Bytecodes::_fcmpg: 2818 b = pop(); 2819 a = pop(); 2820 2821 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 2822 // which negates the result sign except for unordered. Flip the unordered 2823 // as well by using CmpF3 which implements unordered-lesser instead of 2824 // unordered-greater semantics. Finally, commute the result bits. Result 2825 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 2826 c = _gvn.transform( new CmpF3Node( b, a)); 2827 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2828 push(c); 2829 break; 2830 2831 case Bytecodes::_f2i: 2832 a = pop(); 2833 push(_gvn.transform(new ConvF2INode(a))); 2834 break; 2835 2836 case Bytecodes::_d2i: 2837 a = pop_pair(); 2838 b = _gvn.transform(new ConvD2INode(a)); 2839 push( b ); 2840 break; 2841 2842 case Bytecodes::_f2d: 2843 a = pop(); 2844 b = _gvn.transform( new ConvF2DNode(a)); 2845 push_pair( b ); 2846 break; 2847 2848 case Bytecodes::_d2f: 2849 a = pop_pair(); 2850 b = _gvn.transform( new ConvD2FNode(a)); 2851 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 2852 //b = _gvn.transform(new RoundFloatNode(0, b) ); 2853 push( b ); 2854 break; 2855 2856 case Bytecodes::_l2f: 2857 if (Matcher::convL2FSupported()) { 2858 a = pop_pair(); 2859 b = _gvn.transform( new ConvL2FNode(a)); 2860 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 2861 // Rather than storing the result into an FP register then pushing 2862 // out to memory to round, the machine instruction that implements 2863 // ConvL2D is responsible for rounding. 2864 // c = precision_rounding(b); 2865 c = _gvn.transform(b); 2866 push(c); 2867 } else { 2868 l2f(); 2869 } 2870 break; 2871 2872 case Bytecodes::_l2d: 2873 a = pop_pair(); 2874 b = _gvn.transform( new ConvL2DNode(a)); 2875 // For i486.ad, rounding is always necessary (see _l2f above). 2876 // c = dprecision_rounding(b); 2877 c = _gvn.transform(b); 2878 push_pair(c); 2879 break; 2880 2881 case Bytecodes::_f2l: 2882 a = pop(); 2883 b = _gvn.transform( new ConvF2LNode(a)); 2884 push_pair(b); 2885 break; 2886 2887 case Bytecodes::_d2l: 2888 a = pop_pair(); 2889 b = _gvn.transform( new ConvD2LNode(a)); 2890 push_pair(b); 2891 break; 2892 2893 case Bytecodes::_dsub: 2894 b = pop_pair(); 2895 a = pop_pair(); 2896 c = _gvn.transform( new SubDNode(a,b) ); 2897 d = dprecision_rounding(c); 2898 push_pair( d ); 2899 break; 2900 2901 case Bytecodes::_dadd: 2902 b = pop_pair(); 2903 a = pop_pair(); 2904 c = _gvn.transform( new AddDNode(a,b) ); 2905 d = dprecision_rounding(c); 2906 push_pair( d ); 2907 break; 2908 2909 case Bytecodes::_dmul: 2910 b = pop_pair(); 2911 a = pop_pair(); 2912 c = _gvn.transform( new MulDNode(a,b) ); 2913 d = dprecision_rounding(c); 2914 push_pair( d ); 2915 break; 2916 2917 case Bytecodes::_ddiv: 2918 b = pop_pair(); 2919 a = pop_pair(); 2920 c = _gvn.transform( new DivDNode(0,a,b) ); 2921 d = dprecision_rounding(c); 2922 push_pair( d ); 2923 break; 2924 2925 case Bytecodes::_dneg: 2926 a = pop_pair(); 2927 b = _gvn.transform(new NegDNode (a)); 2928 push_pair(b); 2929 break; 2930 2931 case Bytecodes::_drem: 2932 if (Matcher::has_match_rule(Op_ModD)) { 2933 // Generate a ModD node. 2934 b = pop_pair(); 2935 a = pop_pair(); 2936 // a % b 2937 2938 c = _gvn.transform( new ModDNode(0,a,b) ); 2939 d = dprecision_rounding(c); 2940 push_pair( d ); 2941 } 2942 else { 2943 // Generate a call. 2944 modd(); 2945 } 2946 break; 2947 2948 case Bytecodes::_dcmpl: 2949 b = pop_pair(); 2950 a = pop_pair(); 2951 c = _gvn.transform( new CmpD3Node( a, b)); 2952 push(c); 2953 break; 2954 2955 case Bytecodes::_dcmpg: 2956 b = pop_pair(); 2957 a = pop_pair(); 2958 // Same as dcmpl but need to flip the unordered case. 2959 // Commute the inputs, which negates the result sign except for unordered. 2960 // Flip the unordered as well by using CmpD3 which implements 2961 // unordered-lesser instead of unordered-greater semantics. 2962 // Finally, negate the result bits. Result is same as using a 2963 // CmpD3Greater except we did it with CmpD3 alone. 2964 c = _gvn.transform( new CmpD3Node( b, a)); 2965 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2966 push(c); 2967 break; 2968 2969 2970 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2971 case Bytecodes::_land: 2972 b = pop_pair(); 2973 a = pop_pair(); 2974 c = _gvn.transform( new AndLNode(a,b) ); 2975 push_pair(c); 2976 break; 2977 case Bytecodes::_lor: 2978 b = pop_pair(); 2979 a = pop_pair(); 2980 c = _gvn.transform( new OrLNode(a,b) ); 2981 push_pair(c); 2982 break; 2983 case Bytecodes::_lxor: 2984 b = pop_pair(); 2985 a = pop_pair(); 2986 c = _gvn.transform( new XorLNode(a,b) ); 2987 push_pair(c); 2988 break; 2989 2990 case Bytecodes::_lshl: 2991 b = pop(); // the shift count 2992 a = pop_pair(); // value to be shifted 2993 c = _gvn.transform( new LShiftLNode(a,b) ); 2994 push_pair(c); 2995 break; 2996 case Bytecodes::_lshr: 2997 b = pop(); // the shift count 2998 a = pop_pair(); // value to be shifted 2999 c = _gvn.transform( new RShiftLNode(a,b) ); 3000 push_pair(c); 3001 break; 3002 case Bytecodes::_lushr: 3003 b = pop(); // the shift count 3004 a = pop_pair(); // value to be shifted 3005 c = _gvn.transform( new URShiftLNode(a,b) ); 3006 push_pair(c); 3007 break; 3008 case Bytecodes::_lmul: 3009 b = pop_pair(); 3010 a = pop_pair(); 3011 c = _gvn.transform( new MulLNode(a,b) ); 3012 push_pair(c); 3013 break; 3014 3015 case Bytecodes::_lrem: 3016 // Must keep both values on the expression-stack during null-check 3017 assert(peek(0) == top(), "long word order"); 3018 zero_check_long(peek(1)); 3019 // Compile-time detect of null-exception? 3020 if (stopped()) return; 3021 b = pop_pair(); 3022 a = pop_pair(); 3023 c = _gvn.transform( new ModLNode(control(),a,b) ); 3024 push_pair(c); 3025 break; 3026 3027 case Bytecodes::_ldiv: 3028 // Must keep both values on the expression-stack during null-check 3029 assert(peek(0) == top(), "long word order"); 3030 zero_check_long(peek(1)); 3031 // Compile-time detect of null-exception? 3032 if (stopped()) return; 3033 b = pop_pair(); 3034 a = pop_pair(); 3035 c = _gvn.transform( new DivLNode(control(),a,b) ); 3036 push_pair(c); 3037 break; 3038 3039 case Bytecodes::_ladd: 3040 b = pop_pair(); 3041 a = pop_pair(); 3042 c = _gvn.transform( new AddLNode(a,b) ); 3043 push_pair(c); 3044 break; 3045 case Bytecodes::_lsub: 3046 b = pop_pair(); 3047 a = pop_pair(); 3048 c = _gvn.transform( new SubLNode(a,b) ); 3049 push_pair(c); 3050 break; 3051 case Bytecodes::_lcmp: 3052 // Safepoints are now inserted _before_ branches. The long-compare 3053 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 3054 // slew of control flow. These are usually followed by a CmpI vs zero and 3055 // a branch; this pattern then optimizes to the obvious long-compare and 3056 // branch. However, if the branch is backwards there's a Safepoint 3057 // inserted. The inserted Safepoint captures the JVM state at the 3058 // pre-branch point, i.e. it captures the 3-way value. Thus if a 3059 // long-compare is used to control a loop the debug info will force 3060 // computation of the 3-way value, even though the generated code uses a 3061 // long-compare and branch. We try to rectify the situation by inserting 3062 // a SafePoint here and have it dominate and kill the safepoint added at a 3063 // following backwards branch. At this point the JVM state merely holds 2 3064 // longs but not the 3-way value. 3065 if( UseLoopSafepoints ) { 3066 switch( iter().next_bc() ) { 3067 case Bytecodes::_ifgt: 3068 case Bytecodes::_iflt: 3069 case Bytecodes::_ifge: 3070 case Bytecodes::_ifle: 3071 case Bytecodes::_ifne: 3072 case Bytecodes::_ifeq: 3073 // If this is a backwards branch in the bytecodes, add Safepoint 3074 maybe_add_safepoint(iter().next_get_dest()); 3075 default: 3076 break; 3077 } 3078 } 3079 b = pop_pair(); 3080 a = pop_pair(); 3081 c = _gvn.transform( new CmpL3Node( a, b )); 3082 push(c); 3083 break; 3084 3085 case Bytecodes::_lneg: 3086 a = pop_pair(); 3087 b = _gvn.transform( new SubLNode(longcon(0),a)); 3088 push_pair(b); 3089 break; 3090 case Bytecodes::_l2i: 3091 a = pop_pair(); 3092 push( _gvn.transform( new ConvL2INode(a))); 3093 break; 3094 case Bytecodes::_i2l: 3095 a = pop(); 3096 b = _gvn.transform( new ConvI2LNode(a)); 3097 push_pair(b); 3098 break; 3099 case Bytecodes::_i2b: 3100 // Sign extend 3101 a = pop(); 3102 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); 3103 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); 3104 push( a ); 3105 break; 3106 case Bytecodes::_i2s: 3107 a = pop(); 3108 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); 3109 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); 3110 push( a ); 3111 break; 3112 case Bytecodes::_i2c: 3113 a = pop(); 3114 push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); 3115 break; 3116 3117 case Bytecodes::_i2f: 3118 a = pop(); 3119 b = _gvn.transform( new ConvI2FNode(a) ) ; 3120 c = precision_rounding(b); 3121 push (b); 3122 break; 3123 3124 case Bytecodes::_i2d: 3125 a = pop(); 3126 b = _gvn.transform( new ConvI2DNode(a)); 3127 push_pair(b); 3128 break; 3129 3130 case Bytecodes::_iinc: // Increment local 3131 i = iter().get_index(); // Get local index 3132 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 3133 break; 3134 3135 // Exit points of synchronized methods must have an unlock node 3136 case Bytecodes::_return: 3137 return_current(NULL); 3138 break; 3139 3140 case Bytecodes::_ireturn: 3141 case Bytecodes::_areturn: 3142 case Bytecodes::_freturn: 3143 return_current(pop()); 3144 break; 3145 case Bytecodes::_lreturn: 3146 return_current(pop_pair()); 3147 break; 3148 case Bytecodes::_dreturn: 3149 return_current(pop_pair()); 3150 break; 3151 3152 case Bytecodes::_athrow: 3153 // null exception oop throws NULL pointer exception 3154 null_check(peek()); 3155 if (stopped()) return; 3156 // Hook the thrown exception directly to subsequent handlers. 3157 if (BailoutToInterpreterForThrows) { 3158 // Keep method interpreted from now on. 3159 uncommon_trap(Deoptimization::Reason_unhandled, 3160 Deoptimization::Action_make_not_compilable); 3161 return; 3162 } 3163 if (env()->jvmti_can_post_on_exceptions()) { 3164 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 3165 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 3166 } 3167 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 3168 add_exception_state(make_exception_state(peek())); 3169 break; 3170 3171 case Bytecodes::_goto: // fall through 3172 case Bytecodes::_goto_w: { 3173 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 3174 3175 // If this is a backwards branch in the bytecodes, add Safepoint 3176 maybe_add_safepoint(target_bci); 3177 3178 // Update method data 3179 profile_taken_branch(target_bci); 3180 3181 // Merge the current control into the target basic block 3182 merge(target_bci); 3183 3184 // See if we can get some profile data and hand it off to the next block 3185 Block *target_block = block()->successor_for_bci(target_bci); 3186 if (target_block->pred_count() != 1) break; 3187 ciMethodData* methodData = method()->method_data(); 3188 if (!methodData->is_mature()) break; 3189 ciProfileData* data = methodData->bci_to_data(bci()); 3190 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 3191 int taken = ((ciJumpData*)data)->taken(); 3192 taken = method()->scale_count(taken); 3193 target_block->set_count(taken); 3194 break; 3195 } 3196 3197 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 3198 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 3199 handle_if_null: 3200 // If this is a backwards branch in the bytecodes, add Safepoint 3201 maybe_add_safepoint(iter().get_dest()); 3202 a = null(); 3203 b = pop(); 3204 if (b->is_ValueType()) { 3205 // Return constant false because 'b' is always non-null 3206 c = _gvn.makecon(TypeInt::CC_GT); 3207 } else { 3208 if (!_gvn.type(b)->speculative_maybe_null() && 3209 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 3210 inc_sp(1); 3211 Node* null_ctl = top(); 3212 b = null_check_oop(b, &null_ctl, true, true, true); 3213 assert(null_ctl->is_top(), "no null control here"); 3214 dec_sp(1); 3215 } else if (_gvn.type(b)->speculative_always_null() && 3216 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { 3217 inc_sp(1); 3218 b = null_assert(b); 3219 dec_sp(1); 3220 } 3221 c = _gvn.transform( new CmpPNode(b, a) ); 3222 } 3223 do_ifnull(btest, c); 3224 break; 3225 3226 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 3227 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 3228 handle_if_acmp: 3229 // If this is a backwards branch in the bytecodes, add Safepoint 3230 maybe_add_safepoint(iter().get_dest()); 3231 a = access_resolve(pop(), 0); 3232 b = access_resolve(pop(), 0); 3233 do_acmp(btest, a, b); 3234 break; 3235 3236 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 3237 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 3238 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 3239 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 3240 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 3241 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 3242 handle_ifxx: 3243 // If this is a backwards branch in the bytecodes, add Safepoint 3244 maybe_add_safepoint(iter().get_dest()); 3245 a = _gvn.intcon(0); 3246 b = pop(); 3247 c = _gvn.transform( new CmpINode(b, a) ); 3248 do_if(btest, c); 3249 break; 3250 3251 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 3252 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 3253 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 3254 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 3255 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 3256 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 3257 handle_if_icmp: 3258 // If this is a backwards branch in the bytecodes, add Safepoint 3259 maybe_add_safepoint(iter().get_dest()); 3260 a = pop(); 3261 b = pop(); 3262 c = _gvn.transform( new CmpINode( b, a ) ); 3263 do_if(btest, c); 3264 break; 3265 3266 case Bytecodes::_tableswitch: 3267 do_tableswitch(); 3268 break; 3269 3270 case Bytecodes::_lookupswitch: 3271 do_lookupswitch(); 3272 break; 3273 3274 case Bytecodes::_invokestatic: 3275 case Bytecodes::_invokedynamic: 3276 case Bytecodes::_invokespecial: 3277 case Bytecodes::_invokevirtual: 3278 case Bytecodes::_invokeinterface: 3279 do_call(); 3280 break; 3281 case Bytecodes::_checkcast: 3282 do_checkcast(); 3283 break; 3284 case Bytecodes::_instanceof: 3285 do_instanceof(); 3286 break; 3287 case Bytecodes::_anewarray: 3288 do_newarray(); 3289 break; 3290 case Bytecodes::_newarray: 3291 do_newarray((BasicType)iter().get_index()); 3292 break; 3293 case Bytecodes::_multianewarray: 3294 do_multianewarray(); 3295 break; 3296 case Bytecodes::_new: 3297 do_new(); 3298 break; 3299 case Bytecodes::_defaultvalue: 3300 do_defaultvalue(); 3301 break; 3302 case Bytecodes::_withfield: 3303 do_withfield(); 3304 break; 3305 3306 case Bytecodes::_jsr: 3307 case Bytecodes::_jsr_w: 3308 do_jsr(); 3309 break; 3310 3311 case Bytecodes::_ret: 3312 do_ret(); 3313 break; 3314 3315 3316 case Bytecodes::_monitorenter: 3317 do_monitor_enter(); 3318 break; 3319 3320 case Bytecodes::_monitorexit: 3321 do_monitor_exit(); 3322 break; 3323 3324 case Bytecodes::_breakpoint: 3325 // Breakpoint set concurrently to compile 3326 // %%% use an uncommon trap? 3327 C->record_failure("breakpoint in method"); 3328 return; 3329 3330 default: 3331 #ifndef PRODUCT 3332 map()->dump(99); 3333 #endif 3334 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 3335 ShouldNotReachHere(); 3336 } 3337 3338 #ifndef PRODUCT 3339 IdealGraphPrinter *printer = C->printer(); 3340 if (printer && printer->should_print(1)) { 3341 char buffer[256]; 3342 jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 3343 bool old = printer->traverse_outs(); 3344 printer->set_traverse_outs(true); 3345 printer->print_method(buffer, 4); 3346 printer->set_traverse_outs(old); 3347 } 3348 #endif 3349 }