1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "ci/ciValueKlass.hpp" 28 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 29 #include "gc/g1/heapRegion.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/cardTableModRefBS.hpp" 32 #include "gc/shared/collectedHeap.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/graphKit.hpp" 38 #include "opto/idealKit.hpp" 39 #include "opto/intrinsicnode.hpp" 40 #include "opto/locknode.hpp" 41 #include "opto/machnode.hpp" 42 #include "opto/opaquenode.hpp" 43 #include "opto/parse.hpp" 44 #include "opto/rootnode.hpp" 45 #include "opto/runtime.hpp" 46 #include "opto/valuetypenode.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 50 //----------------------------GraphKit----------------------------------------- 51 // Main utility constructor. 52 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn) 53 : Phase(Phase::Parser), 54 _env(C->env()), 55 _gvn((gvn != NULL) ? *gvn : *C->initial_gvn()) 56 { 57 _exceptions = jvms->map()->next_exception(); 58 if (_exceptions != NULL) jvms->map()->set_next_exception(NULL); 59 set_jvms(jvms); 60 #ifdef ASSERT 61 if (_gvn.is_IterGVN() != NULL) { 62 assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used"); 63 // Save the initial size of _for_igvn worklist for verification (see ~GraphKit) 64 _worklist_size = _gvn.C->for_igvn()->size(); 65 } 66 #endif 67 } 68 69 // Private constructor for parser. 70 GraphKit::GraphKit() 71 : Phase(Phase::Parser), 72 _env(C->env()), 73 _gvn(*C->initial_gvn()) 74 { 75 _exceptions = NULL; 76 set_map(NULL); 77 debug_only(_sp = -99); 78 debug_only(set_bci(-99)); 79 } 80 81 82 83 //---------------------------clean_stack--------------------------------------- 84 // Clear away rubbish from the stack area of the JVM state. 85 // This destroys any arguments that may be waiting on the stack. 86 void GraphKit::clean_stack(int from_sp) { 87 SafePointNode* map = this->map(); 88 JVMState* jvms = this->jvms(); 89 int stk_size = jvms->stk_size(); 90 int stkoff = jvms->stkoff(); 91 Node* top = this->top(); 92 for (int i = from_sp; i < stk_size; i++) { 93 if (map->in(stkoff + i) != top) { 94 map->set_req(stkoff + i, top); 95 } 96 } 97 } 98 99 100 //--------------------------------sync_jvms----------------------------------- 101 // Make sure our current jvms agrees with our parse state. 102 JVMState* GraphKit::sync_jvms() const { 103 JVMState* jvms = this->jvms(); 104 jvms->set_bci(bci()); // Record the new bci in the JVMState 105 jvms->set_sp(sp()); // Record the new sp in the JVMState 106 assert(jvms_in_sync(), "jvms is now in sync"); 107 return jvms; 108 } 109 110 //--------------------------------sync_jvms_for_reexecute--------------------- 111 // Make sure our current jvms agrees with our parse state. This version 112 // uses the reexecute_sp for reexecuting bytecodes. 113 JVMState* GraphKit::sync_jvms_for_reexecute() { 114 JVMState* jvms = this->jvms(); 115 jvms->set_bci(bci()); // Record the new bci in the JVMState 116 jvms->set_sp(reexecute_sp()); // Record the new sp in the JVMState 117 return jvms; 118 } 119 120 #ifdef ASSERT 121 bool GraphKit::jvms_in_sync() const { 122 Parse* parse = is_Parse(); 123 if (parse == NULL) { 124 if (bci() != jvms()->bci()) return false; 125 if (sp() != (int)jvms()->sp()) return false; 126 return true; 127 } 128 if (jvms()->method() != parse->method()) return false; 129 if (jvms()->bci() != parse->bci()) return false; 130 int jvms_sp = jvms()->sp(); 131 if (jvms_sp != parse->sp()) return false; 132 int jvms_depth = jvms()->depth(); 133 if (jvms_depth != parse->depth()) return false; 134 return true; 135 } 136 137 // Local helper checks for special internal merge points 138 // used to accumulate and merge exception states. 139 // They are marked by the region's in(0) edge being the map itself. 140 // Such merge points must never "escape" into the parser at large, 141 // until they have been handed to gvn.transform. 142 static bool is_hidden_merge(Node* reg) { 143 if (reg == NULL) return false; 144 if (reg->is_Phi()) { 145 reg = reg->in(0); 146 if (reg == NULL) return false; 147 } 148 return reg->is_Region() && reg->in(0) != NULL && reg->in(0)->is_Root(); 149 } 150 151 void GraphKit::verify_map() const { 152 if (map() == NULL) return; // null map is OK 153 assert(map()->req() <= jvms()->endoff(), "no extra garbage on map"); 154 assert(!map()->has_exceptions(), "call add_exception_states_from 1st"); 155 assert(!is_hidden_merge(control()), "call use_exception_state, not set_map"); 156 } 157 158 void GraphKit::verify_exception_state(SafePointNode* ex_map) { 159 assert(ex_map->next_exception() == NULL, "not already part of a chain"); 160 assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop"); 161 } 162 #endif 163 164 //---------------------------stop_and_kill_map--------------------------------- 165 // Set _map to NULL, signalling a stop to further bytecode execution. 166 // First smash the current map's control to a constant, to mark it dead. 167 void GraphKit::stop_and_kill_map() { 168 SafePointNode* dead_map = stop(); 169 if (dead_map != NULL) { 170 dead_map->disconnect_inputs(NULL, C); // Mark the map as killed. 171 assert(dead_map->is_killed(), "must be so marked"); 172 } 173 } 174 175 176 //--------------------------------stopped-------------------------------------- 177 // Tell if _map is NULL, or control is top. 178 bool GraphKit::stopped() { 179 if (map() == NULL) return true; 180 else if (control() == top()) return true; 181 else return false; 182 } 183 184 185 //-----------------------------has_ex_handler---------------------------------- 186 // Tell if this method or any caller method has exception handlers. 187 bool GraphKit::has_ex_handler() { 188 for (JVMState* jvmsp = jvms(); jvmsp != NULL; jvmsp = jvmsp->caller()) { 189 if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) { 190 return true; 191 } 192 } 193 return false; 194 } 195 196 //------------------------------save_ex_oop------------------------------------ 197 // Save an exception without blowing stack contents or other JVM state. 198 void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) { 199 assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again"); 200 ex_map->add_req(ex_oop); 201 debug_only(verify_exception_state(ex_map)); 202 } 203 204 inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) { 205 assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there"); 206 Node* ex_oop = ex_map->in(ex_map->req()-1); 207 if (clear_it) ex_map->del_req(ex_map->req()-1); 208 return ex_oop; 209 } 210 211 //-----------------------------saved_ex_oop------------------------------------ 212 // Recover a saved exception from its map. 213 Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) { 214 return common_saved_ex_oop(ex_map, false); 215 } 216 217 //--------------------------clear_saved_ex_oop--------------------------------- 218 // Erase a previously saved exception from its map. 219 Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) { 220 return common_saved_ex_oop(ex_map, true); 221 } 222 223 #ifdef ASSERT 224 //---------------------------has_saved_ex_oop---------------------------------- 225 // Erase a previously saved exception from its map. 226 bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) { 227 return ex_map->req() == ex_map->jvms()->endoff()+1; 228 } 229 #endif 230 231 //-------------------------make_exception_state-------------------------------- 232 // Turn the current JVM state into an exception state, appending the ex_oop. 233 SafePointNode* GraphKit::make_exception_state(Node* ex_oop) { 234 sync_jvms(); 235 SafePointNode* ex_map = stop(); // do not manipulate this map any more 236 set_saved_ex_oop(ex_map, ex_oop); 237 return ex_map; 238 } 239 240 241 //--------------------------add_exception_state-------------------------------- 242 // Add an exception to my list of exceptions. 243 void GraphKit::add_exception_state(SafePointNode* ex_map) { 244 if (ex_map == NULL || ex_map->control() == top()) { 245 return; 246 } 247 #ifdef ASSERT 248 verify_exception_state(ex_map); 249 if (has_exceptions()) { 250 assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place"); 251 } 252 #endif 253 254 // If there is already an exception of exactly this type, merge with it. 255 // In particular, null-checks and other low-level exceptions common up here. 256 Node* ex_oop = saved_ex_oop(ex_map); 257 const Type* ex_type = _gvn.type(ex_oop); 258 if (ex_oop == top()) { 259 // No action needed. 260 return; 261 } 262 assert(ex_type->isa_instptr(), "exception must be an instance"); 263 for (SafePointNode* e2 = _exceptions; e2 != NULL; e2 = e2->next_exception()) { 264 const Type* ex_type2 = _gvn.type(saved_ex_oop(e2)); 265 // We check sp also because call bytecodes can generate exceptions 266 // both before and after arguments are popped! 267 if (ex_type2 == ex_type 268 && e2->_jvms->sp() == ex_map->_jvms->sp()) { 269 combine_exception_states(ex_map, e2); 270 return; 271 } 272 } 273 274 // No pre-existing exception of the same type. Chain it on the list. 275 push_exception_state(ex_map); 276 } 277 278 //-----------------------add_exception_states_from----------------------------- 279 void GraphKit::add_exception_states_from(JVMState* jvms) { 280 SafePointNode* ex_map = jvms->map()->next_exception(); 281 if (ex_map != NULL) { 282 jvms->map()->set_next_exception(NULL); 283 for (SafePointNode* next_map; ex_map != NULL; ex_map = next_map) { 284 next_map = ex_map->next_exception(); 285 ex_map->set_next_exception(NULL); 286 add_exception_state(ex_map); 287 } 288 } 289 } 290 291 //-----------------------transfer_exceptions_into_jvms------------------------- 292 JVMState* GraphKit::transfer_exceptions_into_jvms() { 293 if (map() == NULL) { 294 // We need a JVMS to carry the exceptions, but the map has gone away. 295 // Create a scratch JVMS, cloned from any of the exception states... 296 if (has_exceptions()) { 297 _map = _exceptions; 298 _map = clone_map(); 299 _map->set_next_exception(NULL); 300 clear_saved_ex_oop(_map); 301 debug_only(verify_map()); 302 } else { 303 // ...or created from scratch 304 JVMState* jvms = new (C) JVMState(_method, NULL); 305 jvms->set_bci(_bci); 306 jvms->set_sp(_sp); 307 jvms->set_map(new SafePointNode(TypeFunc::Parms, jvms)); 308 set_jvms(jvms); 309 for (uint i = 0; i < map()->req(); i++) map()->init_req(i, top()); 310 set_all_memory(top()); 311 while (map()->req() < jvms->endoff()) map()->add_req(top()); 312 } 313 // (This is a kludge, in case you didn't notice.) 314 set_control(top()); 315 } 316 JVMState* jvms = sync_jvms(); 317 assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet"); 318 jvms->map()->set_next_exception(_exceptions); 319 _exceptions = NULL; // done with this set of exceptions 320 return jvms; 321 } 322 323 static inline void add_n_reqs(Node* dstphi, Node* srcphi) { 324 assert(is_hidden_merge(dstphi), "must be a special merge node"); 325 assert(is_hidden_merge(srcphi), "must be a special merge node"); 326 uint limit = srcphi->req(); 327 for (uint i = PhiNode::Input; i < limit; i++) { 328 dstphi->add_req(srcphi->in(i)); 329 } 330 } 331 static inline void add_one_req(Node* dstphi, Node* src) { 332 assert(is_hidden_merge(dstphi), "must be a special merge node"); 333 assert(!is_hidden_merge(src), "must not be a special merge node"); 334 dstphi->add_req(src); 335 } 336 337 //-----------------------combine_exception_states------------------------------ 338 // This helper function combines exception states by building phis on a 339 // specially marked state-merging region. These regions and phis are 340 // untransformed, and can build up gradually. The region is marked by 341 // having a control input of its exception map, rather than NULL. Such 342 // regions do not appear except in this function, and in use_exception_state. 343 void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) { 344 if (failing()) return; // dying anyway... 345 JVMState* ex_jvms = ex_map->_jvms; 346 assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains"); 347 assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals"); 348 assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes"); 349 assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS"); 350 assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects"); 351 assert(ex_map->req() == phi_map->req(), "matching maps"); 352 uint tos = ex_jvms->stkoff() + ex_jvms->sp(); 353 Node* hidden_merge_mark = root(); 354 Node* region = phi_map->control(); 355 MergeMemNode* phi_mem = phi_map->merged_memory(); 356 MergeMemNode* ex_mem = ex_map->merged_memory(); 357 if (region->in(0) != hidden_merge_mark) { 358 // The control input is not (yet) a specially-marked region in phi_map. 359 // Make it so, and build some phis. 360 region = new RegionNode(2); 361 _gvn.set_type(region, Type::CONTROL); 362 region->set_req(0, hidden_merge_mark); // marks an internal ex-state 363 region->init_req(1, phi_map->control()); 364 phi_map->set_control(region); 365 Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO); 366 record_for_igvn(io_phi); 367 _gvn.set_type(io_phi, Type::ABIO); 368 phi_map->set_i_o(io_phi); 369 for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) { 370 Node* m = mms.memory(); 371 Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C)); 372 record_for_igvn(m_phi); 373 _gvn.set_type(m_phi, Type::MEMORY); 374 mms.set_memory(m_phi); 375 } 376 } 377 378 // Either or both of phi_map and ex_map might already be converted into phis. 379 Node* ex_control = ex_map->control(); 380 // if there is special marking on ex_map also, we add multiple edges from src 381 bool add_multiple = (ex_control->in(0) == hidden_merge_mark); 382 // how wide was the destination phi_map, originally? 383 uint orig_width = region->req(); 384 385 if (add_multiple) { 386 add_n_reqs(region, ex_control); 387 add_n_reqs(phi_map->i_o(), ex_map->i_o()); 388 } else { 389 // ex_map has no merges, so we just add single edges everywhere 390 add_one_req(region, ex_control); 391 add_one_req(phi_map->i_o(), ex_map->i_o()); 392 } 393 for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) { 394 if (mms.is_empty()) { 395 // get a copy of the base memory, and patch some inputs into it 396 const TypePtr* adr_type = mms.adr_type(C); 397 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); 398 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); 399 mms.set_memory(phi); 400 // Prepare to append interesting stuff onto the newly sliced phi: 401 while (phi->req() > orig_width) phi->del_req(phi->req()-1); 402 } 403 // Append stuff from ex_map: 404 if (add_multiple) { 405 add_n_reqs(mms.memory(), mms.memory2()); 406 } else { 407 add_one_req(mms.memory(), mms.memory2()); 408 } 409 } 410 uint limit = ex_map->req(); 411 for (uint i = TypeFunc::Parms; i < limit; i++) { 412 // Skip everything in the JVMS after tos. (The ex_oop follows.) 413 if (i == tos) i = ex_jvms->monoff(); 414 Node* src = ex_map->in(i); 415 Node* dst = phi_map->in(i); 416 if (src != dst) { 417 PhiNode* phi; 418 if (dst->in(0) != region) { 419 dst = phi = PhiNode::make(region, dst, _gvn.type(dst)); 420 record_for_igvn(phi); 421 _gvn.set_type(phi, phi->type()); 422 phi_map->set_req(i, dst); 423 // Prepare to append interesting stuff onto the new phi: 424 while (dst->req() > orig_width) dst->del_req(dst->req()-1); 425 } else { 426 assert(dst->is_Phi(), "nobody else uses a hidden region"); 427 phi = dst->as_Phi(); 428 } 429 if (add_multiple && src->in(0) == ex_control) { 430 // Both are phis. 431 add_n_reqs(dst, src); 432 } else { 433 while (dst->req() < region->req()) add_one_req(dst, src); 434 } 435 const Type* srctype = _gvn.type(src); 436 if (phi->type() != srctype) { 437 const Type* dsttype = phi->type()->meet_speculative(srctype); 438 if (phi->type() != dsttype) { 439 phi->set_type(dsttype); 440 _gvn.set_type(phi, dsttype); 441 } 442 } 443 } 444 } 445 phi_map->merge_replaced_nodes_with(ex_map); 446 } 447 448 //--------------------------use_exception_state-------------------------------- 449 Node* GraphKit::use_exception_state(SafePointNode* phi_map) { 450 if (failing()) { stop(); return top(); } 451 Node* region = phi_map->control(); 452 Node* hidden_merge_mark = root(); 453 assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation"); 454 Node* ex_oop = clear_saved_ex_oop(phi_map); 455 if (region->in(0) == hidden_merge_mark) { 456 // Special marking for internal ex-states. Process the phis now. 457 region->set_req(0, region); // now it's an ordinary region 458 set_jvms(phi_map->jvms()); // ...so now we can use it as a map 459 // Note: Setting the jvms also sets the bci and sp. 460 set_control(_gvn.transform(region)); 461 uint tos = jvms()->stkoff() + sp(); 462 for (uint i = 1; i < tos; i++) { 463 Node* x = phi_map->in(i); 464 if (x->in(0) == region) { 465 assert(x->is_Phi(), "expected a special phi"); 466 phi_map->set_req(i, _gvn.transform(x)); 467 } 468 } 469 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { 470 Node* x = mms.memory(); 471 if (x->in(0) == region) { 472 assert(x->is_Phi(), "nobody else uses a hidden region"); 473 mms.set_memory(_gvn.transform(x)); 474 } 475 } 476 if (ex_oop->in(0) == region) { 477 assert(ex_oop->is_Phi(), "expected a special phi"); 478 ex_oop = _gvn.transform(ex_oop); 479 } 480 } else { 481 set_jvms(phi_map->jvms()); 482 } 483 484 assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared"); 485 assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared"); 486 return ex_oop; 487 } 488 489 //---------------------------------java_bc------------------------------------- 490 Bytecodes::Code GraphKit::java_bc() const { 491 ciMethod* method = this->method(); 492 int bci = this->bci(); 493 if (method != NULL && bci != InvocationEntryBci) 494 return method->java_code_at_bci(bci); 495 else 496 return Bytecodes::_illegal; 497 } 498 499 void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason, 500 bool must_throw) { 501 // if the exception capability is set, then we will generate code 502 // to check the JavaThread.should_post_on_exceptions flag to see 503 // if we actually need to report exception events (for this 504 // thread). If we don't need to report exception events, we will 505 // take the normal fast path provided by add_exception_events. If 506 // exception event reporting is enabled for this thread, we will 507 // take the uncommon_trap in the BuildCutout below. 508 509 // first must access the should_post_on_exceptions_flag in this thread's JavaThread 510 Node* jthread = _gvn.transform(new ThreadLocalNode()); 511 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); 512 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered); 513 514 // Test the should_post_on_exceptions_flag vs. 0 515 Node* chk = _gvn.transform( new CmpINode(should_post_flag, intcon(0)) ); 516 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) ); 517 518 // Branch to slow_path if should_post_on_exceptions_flag was true 519 { BuildCutout unless(this, tst, PROB_MAX); 520 // Do not try anything fancy if we're notifying the VM on every throw. 521 // Cf. case Bytecodes::_athrow in parse2.cpp. 522 uncommon_trap(reason, Deoptimization::Action_none, 523 (ciKlass*)NULL, (char*)NULL, must_throw); 524 } 525 526 } 527 528 //------------------------------builtin_throw---------------------------------- 529 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) { 530 bool must_throw = true; 531 532 if (env()->jvmti_can_post_on_exceptions()) { 533 // check if we must post exception events, take uncommon trap if so 534 uncommon_trap_if_should_post_on_exceptions(reason, must_throw); 535 // here if should_post_on_exceptions is false 536 // continue on with the normal codegen 537 } 538 539 // If this particular condition has not yet happened at this 540 // bytecode, then use the uncommon trap mechanism, and allow for 541 // a future recompilation if several traps occur here. 542 // If the throw is hot, try to use a more complicated inline mechanism 543 // which keeps execution inside the compiled code. 544 bool treat_throw_as_hot = false; 545 ciMethodData* md = method()->method_data(); 546 547 if (ProfileTraps) { 548 if (too_many_traps(reason)) { 549 treat_throw_as_hot = true; 550 } 551 // (If there is no MDO at all, assume it is early in 552 // execution, and that any deopts are part of the 553 // startup transient, and don't need to be remembered.) 554 555 // Also, if there is a local exception handler, treat all throws 556 // as hot if there has been at least one in this method. 557 if (C->trap_count(reason) != 0 558 && method()->method_data()->trap_count(reason) != 0 559 && has_ex_handler()) { 560 treat_throw_as_hot = true; 561 } 562 } 563 564 // If this throw happens frequently, an uncommon trap might cause 565 // a performance pothole. If there is a local exception handler, 566 // and if this particular bytecode appears to be deoptimizing often, 567 // let us handle the throw inline, with a preconstructed instance. 568 // Note: If the deopt count has blown up, the uncommon trap 569 // runtime is going to flush this nmethod, not matter what. 570 if (treat_throw_as_hot 571 && (!StackTraceInThrowable || OmitStackTraceInFastThrow)) { 572 // If the throw is local, we use a pre-existing instance and 573 // punt on the backtrace. This would lead to a missing backtrace 574 // (a repeat of 4292742) if the backtrace object is ever asked 575 // for its backtrace. 576 // Fixing this remaining case of 4292742 requires some flavor of 577 // escape analysis. Leave that for the future. 578 ciInstance* ex_obj = NULL; 579 switch (reason) { 580 case Deoptimization::Reason_null_check: 581 ex_obj = env()->NullPointerException_instance(); 582 break; 583 case Deoptimization::Reason_div0_check: 584 ex_obj = env()->ArithmeticException_instance(); 585 break; 586 case Deoptimization::Reason_range_check: 587 ex_obj = env()->ArrayIndexOutOfBoundsException_instance(); 588 break; 589 case Deoptimization::Reason_class_check: 590 if (java_bc() == Bytecodes::_aastore) { 591 ex_obj = env()->ArrayStoreException_instance(); 592 } else { 593 ex_obj = env()->ClassCastException_instance(); 594 } 595 break; 596 default: 597 break; 598 } 599 if (failing()) { stop(); return; } // exception allocation might fail 600 if (ex_obj != NULL) { 601 // Cheat with a preallocated exception object. 602 if (C->log() != NULL) 603 C->log()->elem("hot_throw preallocated='1' reason='%s'", 604 Deoptimization::trap_reason_name(reason)); 605 const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj); 606 Node* ex_node = _gvn.transform(ConNode::make(ex_con)); 607 608 // Clear the detail message of the preallocated exception object. 609 // Weblogic sometimes mutates the detail message of exceptions 610 // using reflection. 611 int offset = java_lang_Throwable::get_detailMessage_offset(); 612 const TypePtr* adr_typ = ex_con->add_offset(offset); 613 614 Node *adr = basic_plus_adr(ex_node, ex_node, offset); 615 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass()); 616 // Conservatively release stores of object references. 617 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release); 618 619 add_exception_state(make_exception_state(ex_node)); 620 return; 621 } 622 } 623 624 // %%% Maybe add entry to OptoRuntime which directly throws the exc.? 625 // It won't be much cheaper than bailing to the interp., since we'll 626 // have to pass up all the debug-info, and the runtime will have to 627 // create the stack trace. 628 629 // Usual case: Bail to interpreter. 630 // Reserve the right to recompile if we haven't seen anything yet. 631 632 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL; 633 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile; 634 if (treat_throw_as_hot 635 && (method()->method_data()->trap_recompiled_at(bci(), m) 636 || C->too_many_traps(reason))) { 637 // We cannot afford to take more traps here. Suffer in the interpreter. 638 if (C->log() != NULL) 639 C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'", 640 Deoptimization::trap_reason_name(reason), 641 C->trap_count(reason)); 642 action = Deoptimization::Action_none; 643 } 644 645 // "must_throw" prunes the JVM state to include only the stack, if there 646 // are no local exception handlers. This should cut down on register 647 // allocation time and code size, by drastically reducing the number 648 // of in-edges on the call to the uncommon trap. 649 650 uncommon_trap(reason, action, (ciKlass*)NULL, (char*)NULL, must_throw); 651 } 652 653 654 //----------------------------PreserveJVMState--------------------------------- 655 PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) { 656 debug_only(kit->verify_map()); 657 _kit = kit; 658 _map = kit->map(); // preserve the map 659 _sp = kit->sp(); 660 kit->set_map(clone_map ? kit->clone_map() : NULL); 661 #ifdef ASSERT 662 _bci = kit->bci(); 663 Parse* parser = kit->is_Parse(); 664 int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo(); 665 _block = block; 666 #endif 667 } 668 PreserveJVMState::~PreserveJVMState() { 669 GraphKit* kit = _kit; 670 #ifdef ASSERT 671 assert(kit->bci() == _bci, "bci must not shift"); 672 Parse* parser = kit->is_Parse(); 673 int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo(); 674 assert(block == _block, "block must not shift"); 675 #endif 676 kit->set_map(_map); 677 kit->set_sp(_sp); 678 } 679 680 681 //-----------------------------BuildCutout------------------------------------- 682 BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt) 683 : PreserveJVMState(kit) 684 { 685 assert(p->is_Con() || p->is_Bool(), "test must be a bool"); 686 SafePointNode* outer_map = _map; // preserved map is caller's 687 SafePointNode* inner_map = kit->map(); 688 IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt); 689 outer_map->set_control(kit->gvn().transform( new IfTrueNode(iff) )); 690 inner_map->set_control(kit->gvn().transform( new IfFalseNode(iff) )); 691 } 692 BuildCutout::~BuildCutout() { 693 GraphKit* kit = _kit; 694 assert(kit->stopped(), "cutout code must stop, throw, return, etc."); 695 } 696 697 //---------------------------PreserveReexecuteState---------------------------- 698 PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) { 699 assert(!kit->stopped(), "must call stopped() before"); 700 _kit = kit; 701 _sp = kit->sp(); 702 _reexecute = kit->jvms()->_reexecute; 703 } 704 PreserveReexecuteState::~PreserveReexecuteState() { 705 if (_kit->stopped()) return; 706 _kit->jvms()->_reexecute = _reexecute; 707 _kit->set_sp(_sp); 708 } 709 710 //------------------------------clone_map-------------------------------------- 711 // Implementation of PreserveJVMState 712 // 713 // Only clone_map(...) here. If this function is only used in the 714 // PreserveJVMState class we may want to get rid of this extra 715 // function eventually and do it all there. 716 717 SafePointNode* GraphKit::clone_map() { 718 if (map() == NULL) return NULL; 719 720 // Clone the memory edge first 721 Node* mem = MergeMemNode::make(map()->memory()); 722 gvn().set_type_bottom(mem); 723 724 SafePointNode *clonemap = (SafePointNode*)map()->clone(); 725 JVMState* jvms = this->jvms(); 726 JVMState* clonejvms = jvms->clone_shallow(C); 727 clonemap->set_memory(mem); 728 clonemap->set_jvms(clonejvms); 729 clonejvms->set_map(clonemap); 730 record_for_igvn(clonemap); 731 gvn().set_type_bottom(clonemap); 732 return clonemap; 733 } 734 735 736 //-----------------------------set_map_clone----------------------------------- 737 void GraphKit::set_map_clone(SafePointNode* m) { 738 _map = m; 739 _map = clone_map(); 740 _map->set_next_exception(NULL); 741 debug_only(verify_map()); 742 } 743 744 745 //----------------------------kill_dead_locals--------------------------------- 746 // Detect any locals which are known to be dead, and force them to top. 747 void GraphKit::kill_dead_locals() { 748 // Consult the liveness information for the locals. If any 749 // of them are unused, then they can be replaced by top(). This 750 // should help register allocation time and cut down on the size 751 // of the deoptimization information. 752 753 // This call is made from many of the bytecode handling 754 // subroutines called from the Big Switch in do_one_bytecode. 755 // Every bytecode which might include a slow path is responsible 756 // for killing its dead locals. The more consistent we 757 // are about killing deads, the fewer useless phis will be 758 // constructed for them at various merge points. 759 760 // bci can be -1 (InvocationEntryBci). We return the entry 761 // liveness for the method. 762 763 if (method() == NULL || method()->code_size() == 0) { 764 // We are building a graph for a call to a native method. 765 // All locals are live. 766 return; 767 } 768 769 ResourceMark rm; 770 771 // Consult the liveness information for the locals. If any 772 // of them are unused, then they can be replaced by top(). This 773 // should help register allocation time and cut down on the size 774 // of the deoptimization information. 775 MethodLivenessResult live_locals = method()->liveness_at_bci(bci()); 776 777 int len = (int)live_locals.size(); 778 assert(len <= jvms()->loc_size(), "too many live locals"); 779 for (int local = 0; local < len; local++) { 780 if (!live_locals.at(local)) { 781 set_local(local, top()); 782 } 783 } 784 } 785 786 #ifdef ASSERT 787 //-------------------------dead_locals_are_killed------------------------------ 788 // Return true if all dead locals are set to top in the map. 789 // Used to assert "clean" debug info at various points. 790 bool GraphKit::dead_locals_are_killed() { 791 if (method() == NULL || method()->code_size() == 0) { 792 // No locals need to be dead, so all is as it should be. 793 return true; 794 } 795 796 // Make sure somebody called kill_dead_locals upstream. 797 ResourceMark rm; 798 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { 799 if (jvms->loc_size() == 0) continue; // no locals to consult 800 SafePointNode* map = jvms->map(); 801 ciMethod* method = jvms->method(); 802 int bci = jvms->bci(); 803 if (jvms == this->jvms()) { 804 bci = this->bci(); // it might not yet be synched 805 } 806 MethodLivenessResult live_locals = method->liveness_at_bci(bci); 807 int len = (int)live_locals.size(); 808 if (!live_locals.is_valid() || len == 0) 809 // This method is trivial, or is poisoned by a breakpoint. 810 return true; 811 assert(len == jvms->loc_size(), "live map consistent with locals map"); 812 for (int local = 0; local < len; local++) { 813 if (!live_locals.at(local) && map->local(jvms, local) != top()) { 814 if (PrintMiscellaneous && (Verbose || WizardMode)) { 815 tty->print_cr("Zombie local %d: ", local); 816 jvms->dump(); 817 } 818 return false; 819 } 820 } 821 } 822 return true; 823 } 824 825 #endif //ASSERT 826 827 // Helper function for enforcing certain bytecodes to reexecute if 828 // deoptimization happens 829 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) { 830 ciMethod* cur_method = jvms->method(); 831 int cur_bci = jvms->bci(); 832 if (cur_method != NULL && cur_bci != InvocationEntryBci) { 833 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); 834 return Interpreter::bytecode_should_reexecute(code) || 835 (is_anewarray && (code == Bytecodes::_multianewarray)); 836 // Reexecute _multianewarray bytecode which was replaced with 837 // sequence of [a]newarray. See Parse::do_multianewarray(). 838 // 839 // Note: interpreter should not have it set since this optimization 840 // is limited by dimensions and guarded by flag so in some cases 841 // multianewarray() runtime calls will be generated and 842 // the bytecode should not be reexecutes (stack will not be reset). 843 } else { 844 return false; 845 } 846 } 847 848 // Helper function for adding JVMState and debug information to node 849 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { 850 // Add the safepoint edges to the call (or other safepoint). 851 852 // Make sure dead locals are set to top. This 853 // should help register allocation time and cut down on the size 854 // of the deoptimization information. 855 assert(dead_locals_are_killed(), "garbage in debug info before safepoint"); 856 857 // Walk the inline list to fill in the correct set of JVMState's 858 // Also fill in the associated edges for each JVMState. 859 860 // If the bytecode needs to be reexecuted we need to put 861 // the arguments back on the stack. 862 const bool should_reexecute = jvms()->should_reexecute(); 863 JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms(); 864 865 // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to 866 // undefined if the bci is different. This is normal for Parse but it 867 // should not happen for LibraryCallKit because only one bci is processed. 868 assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute), 869 "in LibraryCallKit the reexecute bit should not change"); 870 871 // If we are guaranteed to throw, we can prune everything but the 872 // input to the current bytecode. 873 bool can_prune_locals = false; 874 uint stack_slots_not_pruned = 0; 875 int inputs = 0, depth = 0; 876 if (must_throw) { 877 assert(method() == youngest_jvms->method(), "sanity"); 878 if (compute_stack_effects(inputs, depth)) { 879 can_prune_locals = true; 880 stack_slots_not_pruned = inputs; 881 } 882 } 883 884 if (env()->should_retain_local_variables()) { 885 // At any safepoint, this method can get breakpointed, which would 886 // then require an immediate deoptimization. 887 can_prune_locals = false; // do not prune locals 888 stack_slots_not_pruned = 0; 889 } 890 891 // do not scribble on the input jvms 892 JVMState* out_jvms = youngest_jvms->clone_deep(C); 893 call->set_jvms(out_jvms); // Start jvms list for call node 894 895 // For a known set of bytecodes, the interpreter should reexecute them if 896 // deoptimization happens. We set the reexecute state for them here 897 if (out_jvms->is_reexecute_undefined() && //don't change if already specified 898 should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) { 899 out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed 900 } 901 902 // Presize the call: 903 DEBUG_ONLY(uint non_debug_edges = call->req()); 904 call->add_req_batch(top(), youngest_jvms->debug_depth()); 905 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), ""); 906 907 // Set up edges so that the call looks like this: 908 // Call [state:] ctl io mem fptr retadr 909 // [parms:] parm0 ... parmN 910 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN 911 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...] 912 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN 913 // Note that caller debug info precedes callee debug info. 914 915 // Fill pointer walks backwards from "young:" to "root:" in the diagram above: 916 uint debug_ptr = call->req(); 917 918 // Loop over the map input edges associated with jvms, add them 919 // to the call node, & reset all offsets to match call node array. 920 for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) { 921 uint debug_end = debug_ptr; 922 uint debug_start = debug_ptr - in_jvms->debug_size(); 923 debug_ptr = debug_start; // back up the ptr 924 925 uint p = debug_start; // walks forward in [debug_start, debug_end) 926 uint j, k, l; 927 SafePointNode* in_map = in_jvms->map(); 928 out_jvms->set_map(call); 929 930 if (can_prune_locals) { 931 assert(in_jvms->method() == out_jvms->method(), "sanity"); 932 // If the current throw can reach an exception handler in this JVMS, 933 // then we must keep everything live that can reach that handler. 934 // As a quick and dirty approximation, we look for any handlers at all. 935 if (in_jvms->method()->has_exception_handlers()) { 936 can_prune_locals = false; 937 } 938 } 939 940 // Add the Locals 941 k = in_jvms->locoff(); 942 l = in_jvms->loc_size(); 943 out_jvms->set_locoff(p); 944 if (!can_prune_locals) { 945 for (j = 0; j < l; j++) 946 call->set_req(p++, in_map->in(k+j)); 947 } else { 948 p += l; // already set to top above by add_req_batch 949 } 950 951 // Add the Expression Stack 952 k = in_jvms->stkoff(); 953 l = in_jvms->sp(); 954 out_jvms->set_stkoff(p); 955 if (!can_prune_locals) { 956 for (j = 0; j < l; j++) 957 call->set_req(p++, in_map->in(k+j)); 958 } else if (can_prune_locals && stack_slots_not_pruned != 0) { 959 // Divide stack into {S0,...,S1}, where S0 is set to top. 960 uint s1 = stack_slots_not_pruned; 961 stack_slots_not_pruned = 0; // for next iteration 962 if (s1 > l) s1 = l; 963 uint s0 = l - s1; 964 p += s0; // skip the tops preinstalled by add_req_batch 965 for (j = s0; j < l; j++) 966 call->set_req(p++, in_map->in(k+j)); 967 } else { 968 p += l; // already set to top above by add_req_batch 969 } 970 971 // Add the Monitors 972 k = in_jvms->monoff(); 973 l = in_jvms->mon_size(); 974 out_jvms->set_monoff(p); 975 for (j = 0; j < l; j++) 976 call->set_req(p++, in_map->in(k+j)); 977 978 // Copy any scalar object fields. 979 k = in_jvms->scloff(); 980 l = in_jvms->scl_size(); 981 out_jvms->set_scloff(p); 982 for (j = 0; j < l; j++) 983 call->set_req(p++, in_map->in(k+j)); 984 985 // Finish the new jvms. 986 out_jvms->set_endoff(p); 987 988 assert(out_jvms->endoff() == debug_end, "fill ptr must match"); 989 assert(out_jvms->depth() == in_jvms->depth(), "depth must match"); 990 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match"); 991 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match"); 992 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match"); 993 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match"); 994 995 // Update the two tail pointers in parallel. 996 out_jvms = out_jvms->caller(); 997 in_jvms = in_jvms->caller(); 998 } 999 1000 assert(debug_ptr == non_debug_edges, "debug info must fit exactly"); 1001 1002 // Test the correctness of JVMState::debug_xxx accessors: 1003 assert(call->jvms()->debug_start() == non_debug_edges, ""); 1004 assert(call->jvms()->debug_end() == call->req(), ""); 1005 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, ""); 1006 } 1007 1008 bool GraphKit::compute_stack_effects(int& inputs, int& depth) { 1009 Bytecodes::Code code = java_bc(); 1010 if (code == Bytecodes::_wide) { 1011 code = method()->java_code_at_bci(bci() + 1); 1012 } 1013 1014 BasicType rtype = T_ILLEGAL; 1015 int rsize = 0; 1016 1017 if (code != Bytecodes::_illegal) { 1018 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1 1019 rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V 1020 if (rtype < T_CONFLICT) 1021 rsize = type2size[rtype]; 1022 } 1023 1024 switch (code) { 1025 case Bytecodes::_illegal: 1026 return false; 1027 1028 case Bytecodes::_ldc: 1029 case Bytecodes::_ldc_w: 1030 case Bytecodes::_ldc2_w: 1031 inputs = 0; 1032 break; 1033 1034 case Bytecodes::_dup: inputs = 1; break; 1035 case Bytecodes::_dup_x1: inputs = 2; break; 1036 case Bytecodes::_dup_x2: inputs = 3; break; 1037 case Bytecodes::_dup2: inputs = 2; break; 1038 case Bytecodes::_dup2_x1: inputs = 3; break; 1039 case Bytecodes::_dup2_x2: inputs = 4; break; 1040 case Bytecodes::_swap: inputs = 2; break; 1041 case Bytecodes::_arraylength: inputs = 1; break; 1042 1043 case Bytecodes::_getstatic: 1044 case Bytecodes::_putstatic: 1045 case Bytecodes::_getfield: 1046 case Bytecodes::_putfield: 1047 { 1048 bool ignored_will_link; 1049 ciField* field = method()->get_field_at_bci(bci(), ignored_will_link); 1050 int size = field->type()->size(); 1051 bool is_get = (depth >= 0), is_static = (depth & 1); 1052 inputs = (is_static ? 0 : 1); 1053 if (is_get) { 1054 depth = size - inputs; 1055 } else { 1056 inputs += size; // putxxx pops the value from the stack 1057 depth = - inputs; 1058 } 1059 } 1060 break; 1061 1062 case Bytecodes::_invokevirtual: 1063 case Bytecodes::_invokespecial: 1064 case Bytecodes::_invokestatic: 1065 case Bytecodes::_invokedynamic: 1066 case Bytecodes::_invokeinterface: 1067 { 1068 bool ignored_will_link; 1069 ciSignature* declared_signature = NULL; 1070 ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); 1071 assert(declared_signature != NULL, "cannot be null"); 1072 inputs = declared_signature->arg_size_for_bc(code); 1073 int size = declared_signature->return_type()->size(); 1074 depth = size - inputs; 1075 } 1076 break; 1077 1078 case Bytecodes::_multianewarray: 1079 { 1080 ciBytecodeStream iter(method()); 1081 iter.reset_to_bci(bci()); 1082 iter.next(); 1083 inputs = iter.get_dimensions(); 1084 assert(rsize == 1, ""); 1085 depth = rsize - inputs; 1086 } 1087 break; 1088 1089 case Bytecodes::_withfield: { 1090 bool ignored_will_link; 1091 ciField* field = method()->get_field_at_bci(bci(), ignored_will_link); 1092 int size = field->type()->size(); 1093 inputs = size+1; 1094 depth = rsize - inputs; 1095 break; 1096 } 1097 1098 case Bytecodes::_ireturn: 1099 case Bytecodes::_lreturn: 1100 case Bytecodes::_freturn: 1101 case Bytecodes::_dreturn: 1102 case Bytecodes::_areturn: 1103 assert(rsize == -depth, ""); 1104 inputs = rsize; 1105 break; 1106 1107 case Bytecodes::_jsr: 1108 case Bytecodes::_jsr_w: 1109 inputs = 0; 1110 depth = 1; // S.B. depth=1, not zero 1111 break; 1112 1113 default: 1114 // bytecode produces a typed result 1115 inputs = rsize - depth; 1116 assert(inputs >= 0, ""); 1117 break; 1118 } 1119 1120 #ifdef ASSERT 1121 // spot check 1122 int outputs = depth + inputs; 1123 assert(outputs >= 0, "sanity"); 1124 switch (code) { 1125 case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break; 1126 case Bytecodes::_athrow: assert(inputs == 1 && outputs == 0, ""); break; 1127 case Bytecodes::_aload_0: assert(inputs == 0 && outputs == 1, ""); break; 1128 case Bytecodes::_return: assert(inputs == 0 && outputs == 0, ""); break; 1129 case Bytecodes::_drem: assert(inputs == 4 && outputs == 2, ""); break; 1130 default: break; 1131 } 1132 #endif //ASSERT 1133 1134 return true; 1135 } 1136 1137 1138 1139 //------------------------------basic_plus_adr--------------------------------- 1140 Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) { 1141 // short-circuit a common case 1142 if (offset == intcon(0)) return ptr; 1143 return _gvn.transform( new AddPNode(base, ptr, offset) ); 1144 } 1145 1146 Node* GraphKit::ConvI2L(Node* offset) { 1147 // short-circuit a common case 1148 jint offset_con = find_int_con(offset, Type::OffsetBot); 1149 if (offset_con != Type::OffsetBot) { 1150 return longcon((jlong) offset_con); 1151 } 1152 return _gvn.transform( new ConvI2LNode(offset)); 1153 } 1154 1155 Node* GraphKit::ConvI2UL(Node* offset) { 1156 juint offset_con = (juint) find_int_con(offset, Type::OffsetBot); 1157 if (offset_con != (juint) Type::OffsetBot) { 1158 return longcon((julong) offset_con); 1159 } 1160 Node* conv = _gvn.transform( new ConvI2LNode(offset)); 1161 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint)); 1162 return _gvn.transform( new AndLNode(conv, mask) ); 1163 } 1164 1165 Node* GraphKit::ConvL2I(Node* offset) { 1166 // short-circuit a common case 1167 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot); 1168 if (offset_con != (jlong)Type::OffsetBot) { 1169 return intcon((int) offset_con); 1170 } 1171 return _gvn.transform( new ConvL2INode(offset)); 1172 } 1173 1174 //-------------------------load_object_klass----------------------------------- 1175 Node* GraphKit::load_object_klass(Node* obj) { 1176 // Special-case a fresh allocation to avoid building nodes: 1177 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn); 1178 if (akls != NULL) return akls; 1179 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); 1180 return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); 1181 } 1182 1183 //-------------------------load_array_length----------------------------------- 1184 Node* GraphKit::load_array_length(Node* array) { 1185 // Special-case a fresh allocation to avoid building nodes: 1186 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn); 1187 Node *alen; 1188 if (alloc == NULL) { 1189 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); 1190 alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); 1191 } else { 1192 alen = alloc->Ideal_length(); 1193 Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn); 1194 if (ccast != alen) { 1195 alen = _gvn.transform(ccast); 1196 } 1197 } 1198 return alen; 1199 } 1200 1201 //------------------------------do_null_check---------------------------------- 1202 // Helper function to do a NULL pointer check. Returned value is 1203 // the incoming address with NULL casted away. You are allowed to use the 1204 // not-null value only if you are control dependent on the test. 1205 #ifndef PRODUCT 1206 extern int explicit_null_checks_inserted, 1207 explicit_null_checks_elided; 1208 #endif 1209 Node* GraphKit::null_check_common(Node* value, BasicType type, 1210 // optional arguments for variations: 1211 bool assert_null, 1212 Node* *null_control, 1213 bool speculative) { 1214 assert(!assert_null || null_control == NULL, "not both at once"); 1215 if (stopped()) return top(); 1216 NOT_PRODUCT(explicit_null_checks_inserted++); 1217 1218 // Construct NULL check 1219 Node *chk = NULL; 1220 switch(type) { 1221 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break; 1222 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break; 1223 case T_VALUETYPE : // fall through 1224 case T_ARRAY : // fall through 1225 type = T_OBJECT; // simplify further tests 1226 case T_OBJECT : { 1227 const Type *t = _gvn.type( value ); 1228 1229 const TypeOopPtr* tp = t->isa_oopptr(); 1230 if (tp != NULL && tp->klass() != NULL && !tp->klass()->is_loaded() 1231 // Only for do_null_check, not any of its siblings: 1232 && !assert_null && null_control == NULL) { 1233 // Usually, any field access or invocation on an unloaded oop type 1234 // will simply fail to link, since the statically linked class is 1235 // likely also to be unloaded. However, in -Xcomp mode, sometimes 1236 // the static class is loaded but the sharper oop type is not. 1237 // Rather than checking for this obscure case in lots of places, 1238 // we simply observe that a null check on an unloaded class 1239 // will always be followed by a nonsense operation, so we 1240 // can just issue the uncommon trap here. 1241 // Our access to the unloaded class will only be correct 1242 // after it has been loaded and initialized, which requires 1243 // a trip through the interpreter. 1244 #ifndef PRODUCT 1245 if (WizardMode) { tty->print("Null check of unloaded "); tp->klass()->print(); tty->cr(); } 1246 #endif 1247 uncommon_trap(Deoptimization::Reason_unloaded, 1248 Deoptimization::Action_reinterpret, 1249 tp->klass(), "!loaded"); 1250 return top(); 1251 } 1252 1253 if (assert_null) { 1254 // See if the type is contained in NULL_PTR. 1255 // If so, then the value is already null. 1256 if (t->higher_equal(TypePtr::NULL_PTR)) { 1257 NOT_PRODUCT(explicit_null_checks_elided++); 1258 return value; // Elided null assert quickly! 1259 } 1260 } else { 1261 // See if mixing in the NULL pointer changes type. 1262 // If so, then the NULL pointer was not allowed in the original 1263 // type. In other words, "value" was not-null. 1264 if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) { 1265 // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... 1266 NOT_PRODUCT(explicit_null_checks_elided++); 1267 return value; // Elided null check quickly! 1268 } 1269 } 1270 chk = new CmpPNode( value, null() ); 1271 break; 1272 } 1273 1274 default: 1275 fatal("unexpected type: %s", type2name(type)); 1276 } 1277 assert(chk != NULL, "sanity check"); 1278 chk = _gvn.transform(chk); 1279 1280 BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne; 1281 BoolNode *btst = new BoolNode( chk, btest); 1282 Node *tst = _gvn.transform( btst ); 1283 1284 //----------- 1285 // if peephole optimizations occurred, a prior test existed. 1286 // If a prior test existed, maybe it dominates as we can avoid this test. 1287 if (tst != btst && type == T_OBJECT) { 1288 // At this point we want to scan up the CFG to see if we can 1289 // find an identical test (and so avoid this test altogether). 1290 Node *cfg = control(); 1291 int depth = 0; 1292 while( depth < 16 ) { // Limit search depth for speed 1293 if( cfg->Opcode() == Op_IfTrue && 1294 cfg->in(0)->in(1) == tst ) { 1295 // Found prior test. Use "cast_not_null" to construct an identical 1296 // CastPP (and hence hash to) as already exists for the prior test. 1297 // Return that casted value. 1298 if (assert_null) { 1299 replace_in_map(value, null()); 1300 return null(); // do not issue the redundant test 1301 } 1302 Node *oldcontrol = control(); 1303 set_control(cfg); 1304 Node *res = cast_not_null(value); 1305 set_control(oldcontrol); 1306 NOT_PRODUCT(explicit_null_checks_elided++); 1307 return res; 1308 } 1309 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true); 1310 if (cfg == NULL) break; // Quit at region nodes 1311 depth++; 1312 } 1313 } 1314 1315 //----------- 1316 // Branch to failure if null 1317 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen 1318 Deoptimization::DeoptReason reason; 1319 if (assert_null) { 1320 reason = Deoptimization::reason_null_assert(speculative); 1321 } else if (type == T_OBJECT) { 1322 reason = Deoptimization::reason_null_check(speculative); 1323 } else { 1324 reason = Deoptimization::Reason_div0_check; 1325 } 1326 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis, 1327 // ciMethodData::has_trap_at will return a conservative -1 if any 1328 // must-be-null assertion has failed. This could cause performance 1329 // problems for a method after its first do_null_assert failure. 1330 // Consider using 'Reason_class_check' instead? 1331 1332 // To cause an implicit null check, we set the not-null probability 1333 // to the maximum (PROB_MAX). For an explicit check the probability 1334 // is set to a smaller value. 1335 if (null_control != NULL || too_many_traps(reason)) { 1336 // probability is less likely 1337 ok_prob = PROB_LIKELY_MAG(3); 1338 } else if (!assert_null && 1339 (ImplicitNullCheckThreshold > 0) && 1340 method() != NULL && 1341 (method()->method_data()->trap_count(reason) 1342 >= (uint)ImplicitNullCheckThreshold)) { 1343 ok_prob = PROB_LIKELY_MAG(3); 1344 } 1345 1346 if (null_control != NULL) { 1347 IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN); 1348 Node* null_true = _gvn.transform( new IfFalseNode(iff)); 1349 set_control( _gvn.transform( new IfTrueNode(iff))); 1350 #ifndef PRODUCT 1351 if (null_true == top()) { 1352 explicit_null_checks_elided++; 1353 } 1354 #endif 1355 (*null_control) = null_true; 1356 } else { 1357 BuildCutout unless(this, tst, ok_prob); 1358 // Check for optimizer eliding test at parse time 1359 if (stopped()) { 1360 // Failure not possible; do not bother making uncommon trap. 1361 NOT_PRODUCT(explicit_null_checks_elided++); 1362 } else if (assert_null) { 1363 uncommon_trap(reason, 1364 Deoptimization::Action_make_not_entrant, 1365 NULL, "assert_null"); 1366 } else { 1367 replace_in_map(value, zerocon(type)); 1368 builtin_throw(reason); 1369 } 1370 } 1371 1372 // Must throw exception, fall-thru not possible? 1373 if (stopped()) { 1374 return top(); // No result 1375 } 1376 1377 if (assert_null) { 1378 // Cast obj to null on this path. 1379 replace_in_map(value, zerocon(type)); 1380 return zerocon(type); 1381 } 1382 1383 // Cast obj to not-null on this path, if there is no null_control. 1384 // (If there is a null_control, a non-null value may come back to haunt us.) 1385 if (type == T_OBJECT) { 1386 Node* cast = cast_not_null(value, false); 1387 if (null_control == NULL || (*null_control) == top()) 1388 replace_in_map(value, cast); 1389 value = cast; 1390 } 1391 1392 return value; 1393 } 1394 1395 1396 //------------------------------cast_not_null---------------------------------- 1397 // Cast obj to not-null on this path 1398 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { 1399 const Type *t = _gvn.type(obj); 1400 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); 1401 // Object is already not-null? 1402 if( t == t_not_null ) return obj; 1403 1404 Node *cast = new CastPPNode(obj,t_not_null); 1405 cast->init_req(0, control()); 1406 cast = _gvn.transform( cast ); 1407 1408 // Scan for instances of 'obj' in the current JVM mapping. 1409 // These instances are known to be not-null after the test. 1410 if (do_replace_in_map) 1411 replace_in_map(obj, cast); 1412 1413 return cast; // Return casted value 1414 } 1415 1416 // Sometimes in intrinsics, we implicitly know an object is not null 1417 // (there's no actual null check) so we can cast it to not null. In 1418 // the course of optimizations, the input to the cast can become null. 1419 // In that case that data path will die and we need the control path 1420 // to become dead as well to keep the graph consistent. So we have to 1421 // add a check for null for which one branch can't be taken. It uses 1422 // an Opaque4 node that will cause the check to be removed after loop 1423 // opts so the test goes away and the compiled code doesn't execute a 1424 // useless check. 1425 Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) { 1426 Node* chk = _gvn.transform(new CmpPNode(value, null())); 1427 Node *tst = _gvn.transform(new BoolNode(chk, BoolTest::ne)); 1428 Node* opaq = _gvn.transform(new Opaque4Node(C, tst, intcon(1))); 1429 IfNode *iff = new IfNode(control(), opaq, PROB_MAX, COUNT_UNKNOWN); 1430 _gvn.set_type(iff, iff->Value(&_gvn)); 1431 Node *if_f = _gvn.transform(new IfFalseNode(iff)); 1432 Node *frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr)); 1433 Node *halt = _gvn.transform(new HaltNode(if_f, frame)); 1434 C->root()->add_req(halt); 1435 Node *if_t = _gvn.transform(new IfTrueNode(iff)); 1436 set_control(if_t); 1437 return cast_not_null(value, do_replace_in_map); 1438 } 1439 1440 1441 //--------------------------replace_in_map------------------------------------- 1442 void GraphKit::replace_in_map(Node* old, Node* neww) { 1443 if (old == neww) { 1444 return; 1445 } 1446 1447 map()->replace_edge(old, neww); 1448 1449 // Note: This operation potentially replaces any edge 1450 // on the map. This includes locals, stack, and monitors 1451 // of the current (innermost) JVM state. 1452 1453 // don't let inconsistent types from profiling escape this 1454 // method 1455 1456 const Type* told = _gvn.type(old); 1457 const Type* tnew = _gvn.type(neww); 1458 1459 if (!tnew->higher_equal(told)) { 1460 return; 1461 } 1462 1463 map()->record_replaced_node(old, neww); 1464 } 1465 1466 1467 //============================================================================= 1468 //--------------------------------memory--------------------------------------- 1469 Node* GraphKit::memory(uint alias_idx) { 1470 MergeMemNode* mem = merged_memory(); 1471 Node* p = mem->memory_at(alias_idx); 1472 _gvn.set_type(p, Type::MEMORY); // must be mapped 1473 return p; 1474 } 1475 1476 //-----------------------------reset_memory------------------------------------ 1477 Node* GraphKit::reset_memory() { 1478 Node* mem = map()->memory(); 1479 // do not use this node for any more parsing! 1480 debug_only( map()->set_memory((Node*)NULL) ); 1481 return _gvn.transform( mem ); 1482 } 1483 1484 //------------------------------set_all_memory--------------------------------- 1485 void GraphKit::set_all_memory(Node* newmem) { 1486 Node* mergemem = MergeMemNode::make(newmem); 1487 gvn().set_type_bottom(mergemem); 1488 map()->set_memory(mergemem); 1489 } 1490 1491 //------------------------------set_all_memory_call---------------------------- 1492 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) { 1493 Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) ); 1494 set_all_memory(newmem); 1495 } 1496 1497 //============================================================================= 1498 // 1499 // parser factory methods for MemNodes 1500 // 1501 // These are layered on top of the factory methods in LoadNode and StoreNode, 1502 // and integrate with the parser's memory state and _gvn engine. 1503 // 1504 1505 // factory methods in "int adr_idx" 1506 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 1507 int adr_idx, 1508 MemNode::MemOrd mo, 1509 LoadNode::ControlDependency control_dependency, 1510 bool require_atomic_access, 1511 bool unaligned, 1512 bool mismatched) { 1513 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); 1514 const TypePtr* adr_type = NULL; // debug-mode-only argument 1515 debug_only(adr_type = C->get_adr_type(adr_idx)); 1516 Node* mem = memory(adr_idx); 1517 Node* ld; 1518 if (require_atomic_access && bt == T_LONG) { 1519 ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched); 1520 } else if (require_atomic_access && bt == T_DOUBLE) { 1521 ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched); 1522 } else { 1523 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched); 1524 } 1525 ld = _gvn.transform(ld); 1526 if (bt == T_VALUETYPE) { 1527 // Loading a non-flattened value type from memory requires a null check. 1528 ld = ValueTypeNode::make_from_oop(this, ld, t->make_ptr()->is_valuetypeptr()->value_klass(), true /* null check */); 1529 } else if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) { 1530 // Improve graph before escape analysis and boxing elimination. 1531 record_for_igvn(ld); 1532 } 1533 return ld; 1534 } 1535 1536 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, 1537 int adr_idx, 1538 MemNode::MemOrd mo, 1539 bool require_atomic_access, 1540 bool unaligned, 1541 bool mismatched) { 1542 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 1543 const TypePtr* adr_type = NULL; 1544 debug_only(adr_type = C->get_adr_type(adr_idx)); 1545 Node *mem = memory(adr_idx); 1546 Node* st; 1547 if (require_atomic_access && bt == T_LONG) { 1548 st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo); 1549 } else if (require_atomic_access && bt == T_DOUBLE) { 1550 st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo); 1551 } else { 1552 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); 1553 } 1554 if (unaligned) { 1555 st->as_Store()->set_unaligned_access(); 1556 } 1557 if (mismatched) { 1558 st->as_Store()->set_mismatched_access(); 1559 } 1560 st = _gvn.transform(st); 1561 set_memory(st, adr_idx); 1562 // Back-to-back stores can only remove intermediate store with DU info 1563 // so push on worklist for optimizer. 1564 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address)) 1565 record_for_igvn(st); 1566 1567 return st; 1568 } 1569 1570 1571 void GraphKit::pre_barrier(bool do_load, 1572 Node* ctl, 1573 Node* obj, 1574 Node* adr, 1575 uint adr_idx, 1576 Node* val, 1577 const TypeOopPtr* val_type, 1578 Node* pre_val, 1579 BasicType bt) { 1580 1581 BarrierSet* bs = Universe::heap()->barrier_set(); 1582 set_control(ctl); 1583 switch (bs->kind()) { 1584 case BarrierSet::G1SATBCTLogging: 1585 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt); 1586 break; 1587 1588 case BarrierSet::CardTableForRS: 1589 case BarrierSet::CardTableExtension: 1590 case BarrierSet::ModRef: 1591 break; 1592 1593 default : 1594 ShouldNotReachHere(); 1595 1596 } 1597 } 1598 1599 bool GraphKit::can_move_pre_barrier() const { 1600 BarrierSet* bs = Universe::heap()->barrier_set(); 1601 switch (bs->kind()) { 1602 case BarrierSet::G1SATBCTLogging: 1603 return true; // Can move it if no safepoint 1604 1605 case BarrierSet::CardTableForRS: 1606 case BarrierSet::CardTableExtension: 1607 case BarrierSet::ModRef: 1608 return true; // There is no pre-barrier 1609 1610 default : 1611 ShouldNotReachHere(); 1612 } 1613 return false; 1614 } 1615 1616 void GraphKit::post_barrier(Node* ctl, 1617 Node* store, 1618 Node* obj, 1619 Node* adr, 1620 uint adr_idx, 1621 Node* val, 1622 BasicType bt, 1623 bool use_precise) { 1624 BarrierSet* bs = Universe::heap()->barrier_set(); 1625 set_control(ctl); 1626 switch (bs->kind()) { 1627 case BarrierSet::G1SATBCTLogging: 1628 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise); 1629 break; 1630 1631 case BarrierSet::CardTableForRS: 1632 case BarrierSet::CardTableExtension: 1633 write_barrier_post(store, obj, adr, adr_idx, val, use_precise); 1634 break; 1635 1636 case BarrierSet::ModRef: 1637 break; 1638 1639 default : 1640 ShouldNotReachHere(); 1641 1642 } 1643 } 1644 1645 Node* GraphKit::store_oop(Node* ctl, 1646 Node* obj, 1647 Node* adr, 1648 const TypePtr* adr_type, 1649 Node* val, 1650 const TypeOopPtr* val_type, 1651 BasicType bt, 1652 bool use_precise, 1653 MemNode::MemOrd mo, 1654 bool mismatched) { 1655 // Transformation of a value which could be NULL pointer (CastPP #NULL) 1656 // could be delayed during Parse (for example, in adjust_map_after_if()). 1657 // Execute transformation here to avoid barrier generation in such case. 1658 if (_gvn.type(val) == TypePtr::NULL_PTR) 1659 val = _gvn.makecon(TypePtr::NULL_PTR); 1660 1661 set_control(ctl); 1662 if (stopped()) return top(); // Dead path ? 1663 1664 assert(bt == T_OBJECT || bt == T_VALUETYPE, "sanity"); 1665 assert(val != NULL, "not dead path"); 1666 uint adr_idx = C->get_alias_index(adr_type); 1667 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 1668 1669 if (val->is_ValueType()) { 1670 // Allocate value type and get oop 1671 val = val->as_ValueType()->allocate(this)->get_oop(); 1672 } 1673 1674 pre_barrier(true /* do_load */, 1675 control(), obj, adr, adr_idx, val, val_type, 1676 NULL /* pre_val */, 1677 bt); 1678 1679 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched); 1680 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); 1681 return store; 1682 } 1683 1684 // Could be an array or object we don't know at compile time (unsafe ref.) 1685 Node* GraphKit::store_oop_to_unknown(Node* ctl, 1686 Node* obj, // containing obj 1687 Node* adr, // actual adress to store val at 1688 const TypePtr* adr_type, 1689 Node* val, 1690 BasicType bt, 1691 MemNode::MemOrd mo, 1692 bool mismatched) { 1693 Compile::AliasType* at = C->alias_type(adr_type); 1694 const TypeOopPtr* val_type = NULL; 1695 if (adr_type->isa_instptr()) { 1696 if (at->field() != NULL) { 1697 // known field. This code is a copy of the do_put_xxx logic. 1698 ciField* field = at->field(); 1699 if (!field->type()->is_loaded()) { 1700 val_type = TypeInstPtr::BOTTOM; 1701 } else { 1702 val_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); 1703 } 1704 } 1705 } else if (adr_type->isa_aryptr()) { 1706 val_type = adr_type->is_aryptr()->elem()->make_oopptr(); 1707 } 1708 if (val_type == NULL) { 1709 val_type = TypeInstPtr::BOTTOM; 1710 } 1711 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched); 1712 } 1713 1714 1715 //-------------------------array_element_address------------------------- 1716 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, 1717 const TypeInt* sizetype, Node* ctrl) { 1718 uint shift = exact_log2(type2aelembytes(elembt)); 1719 ciKlass* arytype_klass = _gvn.type(ary)->is_aryptr()->klass(); 1720 if (arytype_klass->is_value_array_klass()) { 1721 ciValueArrayKlass* vak = arytype_klass->as_value_array_klass(); 1722 shift = vak->log2_element_size(); 1723 } 1724 uint header = arrayOopDesc::base_offset_in_bytes(elembt); 1725 1726 // short-circuit a common case (saves lots of confusing waste motion) 1727 jint idx_con = find_int_con(idx, -1); 1728 if (idx_con >= 0) { 1729 intptr_t offset = header + ((intptr_t)idx_con << shift); 1730 return basic_plus_adr(ary, offset); 1731 } 1732 1733 // must be correct type for alignment purposes 1734 Node* base = basic_plus_adr(ary, header); 1735 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl); 1736 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) ); 1737 return basic_plus_adr(ary, base, scale); 1738 } 1739 1740 //-------------------------load_array_element------------------------- 1741 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { 1742 const Type* elemtype = arytype->elem(); 1743 BasicType elembt = elemtype->array_element_basic_type(); 1744 assert(elembt != T_VALUETYPE, "value types are not supported by this method"); 1745 Node* adr = array_element_address(ary, idx, elembt, arytype->size()); 1746 if (elembt == T_NARROWOOP) { 1747 elembt = T_OBJECT; // To satisfy switch in LoadNode::make() 1748 } 1749 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered); 1750 return ld; 1751 } 1752 1753 //-------------------------set_arguments_for_java_call------------------------- 1754 // Arguments (pre-popped from the stack) are taken from the JVMS. 1755 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) { 1756 // Add the call arguments: 1757 const TypeTuple* domain = call->tf()->domain_sig(); 1758 uint nargs = domain->cnt(); 1759 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) { 1760 Node* arg = argument(i-TypeFunc::Parms); 1761 if (ValueTypePassFieldsAsArgs) { 1762 if (arg->is_ValueType()) { 1763 ValueTypeNode* vt = arg->as_ValueType(); 1764 if (!domain->field_at(i)->is_valuetypeptr()->is__Value()) { 1765 // We don't pass value type arguments by reference but instead 1766 // pass each field of the value type 1767 idx += vt->pass_fields(call, idx, *this); 1768 // If a value type argument is passed as fields, attach the Method* to the call site 1769 // to be able to access the extended signature later via attached_method_before_pc(). 1770 // For example, see CompiledMethod::preserve_callee_argument_oops(). 1771 call->set_override_symbolic_info(true); 1772 } else { 1773 arg = arg->as_ValueType()->allocate(this)->get_oop(); 1774 call->init_req(idx, arg); 1775 idx++; 1776 } 1777 } else { 1778 call->init_req(idx, arg); 1779 idx++; 1780 } 1781 } else { 1782 if (arg->is_ValueType()) { 1783 // Pass value type argument via oop to callee 1784 arg = arg->as_ValueType()->allocate(this)->get_oop(); 1785 } 1786 call->init_req(i, arg); 1787 } 1788 } 1789 } 1790 1791 //---------------------------set_edges_for_java_call--------------------------- 1792 // Connect a newly created call into the current JVMS. 1793 // A return value node (if any) is returned from set_edges_for_java_call. 1794 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) { 1795 1796 // Add the predefined inputs: 1797 call->init_req( TypeFunc::Control, control() ); 1798 call->init_req( TypeFunc::I_O , i_o() ); 1799 call->init_req( TypeFunc::Memory , reset_memory() ); 1800 call->init_req( TypeFunc::FramePtr, frameptr() ); 1801 call->init_req( TypeFunc::ReturnAdr, top() ); 1802 1803 add_safepoint_edges(call, must_throw); 1804 1805 Node* xcall = _gvn.transform(call); 1806 1807 if (xcall == top()) { 1808 set_control(top()); 1809 return; 1810 } 1811 assert(xcall == call, "call identity is stable"); 1812 1813 // Re-use the current map to produce the result. 1814 1815 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control))); 1816 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj))); 1817 set_all_memory_call(xcall, separate_io_proj); 1818 1819 //return xcall; // no need, caller already has it 1820 } 1821 1822 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) { 1823 if (stopped()) return top(); // maybe the call folded up? 1824 1825 // Note: Since any out-of-line call can produce an exception, 1826 // we always insert an I_O projection from the call into the result. 1827 1828 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj); 1829 1830 if (separate_io_proj) { 1831 // The caller requested separate projections be used by the fall 1832 // through and exceptional paths, so replace the projections for 1833 // the fall through path. 1834 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) )); 1835 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) )); 1836 } 1837 1838 // Capture the return value, if any. 1839 Node* ret; 1840 if (call->method() == NULL || 1841 call->method()->return_type()->basic_type() == T_VOID) { 1842 ret = top(); 1843 } else { 1844 if (!call->tf()->returns_value_type_as_fields()) { 1845 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); 1846 } else { 1847 // Return of multiple values (value type fields): we create a 1848 // ValueType node, each field is a projection from the call. 1849 const TypeTuple* range_sig = call->tf()->range_sig(); 1850 const Type* t = range_sig->field_at(TypeFunc::Parms); 1851 assert(t->isa_valuetypeptr(), "only value types for multiple return values"); 1852 ciValueKlass* vk = t->is_valuetypeptr()->value_klass(); 1853 Node* ctl = control(); 1854 ret = ValueTypeNode::make_from_multi(_gvn, ctl, merged_memory(), call, vk, TypeFunc::Parms+1, false); 1855 set_control(ctl); 1856 } 1857 } 1858 1859 return ret; 1860 } 1861 1862 //--------------------set_predefined_input_for_runtime_call-------------------- 1863 // Reading and setting the memory state is way conservative here. 1864 // The real problem is that I am not doing real Type analysis on memory, 1865 // so I cannot distinguish card mark stores from other stores. Across a GC 1866 // point the Store Barrier and the card mark memory has to agree. I cannot 1867 // have a card mark store and its barrier split across the GC point from 1868 // either above or below. Here I get that to happen by reading ALL of memory. 1869 // A better answer would be to separate out card marks from other memory. 1870 // For now, return the input memory state, so that it can be reused 1871 // after the call, if this call has restricted memory effects. 1872 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) { 1873 // Set fixed predefined input arguments 1874 Node* memory = reset_memory(); 1875 call->init_req( TypeFunc::Control, control() ); 1876 call->init_req( TypeFunc::I_O, top() ); // does no i/o 1877 call->init_req( TypeFunc::Memory, memory ); // may gc ptrs 1878 call->init_req( TypeFunc::FramePtr, frameptr() ); 1879 call->init_req( TypeFunc::ReturnAdr, top() ); 1880 return memory; 1881 } 1882 1883 //-------------------set_predefined_output_for_runtime_call-------------------- 1884 // Set control and memory (not i_o) from the call. 1885 // If keep_mem is not NULL, use it for the output state, 1886 // except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM. 1887 // If hook_mem is NULL, this call produces no memory effects at all. 1888 // If hook_mem is a Java-visible memory slice (such as arraycopy operands), 1889 // then only that memory slice is taken from the call. 1890 // In the last case, we must put an appropriate memory barrier before 1891 // the call, so as to create the correct anti-dependencies on loads 1892 // preceding the call. 1893 void GraphKit::set_predefined_output_for_runtime_call(Node* call, 1894 Node* keep_mem, 1895 const TypePtr* hook_mem) { 1896 // no i/o 1897 set_control(_gvn.transform( new ProjNode(call,TypeFunc::Control) )); 1898 if (keep_mem) { 1899 // First clone the existing memory state 1900 set_all_memory(keep_mem); 1901 if (hook_mem != NULL) { 1902 // Make memory for the call 1903 Node* mem = _gvn.transform( new ProjNode(call, TypeFunc::Memory) ); 1904 // Set the RawPtr memory state only. This covers all the heap top/GC stuff 1905 // We also use hook_mem to extract specific effects from arraycopy stubs. 1906 set_memory(mem, hook_mem); 1907 } 1908 // ...else the call has NO memory effects. 1909 1910 // Make sure the call advertises its memory effects precisely. 1911 // This lets us build accurate anti-dependences in gcm.cpp. 1912 assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem), 1913 "call node must be constructed correctly"); 1914 } else { 1915 assert(hook_mem == NULL, ""); 1916 // This is not a "slow path" call; all memory comes from the call. 1917 set_all_memory_call(call); 1918 } 1919 } 1920 1921 1922 // Replace the call with the current state of the kit. 1923 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) { 1924 JVMState* ejvms = NULL; 1925 if (has_exceptions()) { 1926 ejvms = transfer_exceptions_into_jvms(); 1927 } 1928 1929 ReplacedNodes replaced_nodes = map()->replaced_nodes(); 1930 ReplacedNodes replaced_nodes_exception; 1931 Node* ex_ctl = top(); 1932 1933 SafePointNode* final_state = stop(); 1934 1935 // Find all the needed outputs of this call 1936 CallProjections* callprojs = call->extract_projections(true); 1937 1938 Node* init_mem = call->in(TypeFunc::Memory); 1939 Node* final_mem = final_state->in(TypeFunc::Memory); 1940 Node* final_ctl = final_state->in(TypeFunc::Control); 1941 Node* final_io = final_state->in(TypeFunc::I_O); 1942 1943 // Replace all the old call edges with the edges from the inlining result 1944 if (callprojs->fallthrough_catchproj != NULL) { 1945 C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl); 1946 } 1947 if (callprojs->fallthrough_memproj != NULL) { 1948 if (final_mem->is_MergeMem()) { 1949 // Parser's exits MergeMem was not transformed but may be optimized 1950 final_mem = _gvn.transform(final_mem); 1951 } 1952 C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem); 1953 } 1954 if (callprojs->fallthrough_ioproj != NULL) { 1955 C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io); 1956 } 1957 1958 // Replace the result with the new result if it exists and is used 1959 if (callprojs->resproj[0] != NULL && result != NULL) { 1960 assert(callprojs->nb_resproj == 1, "unexpected number of results"); 1961 C->gvn_replace_by(callprojs->resproj[0], result); 1962 } 1963 1964 if (ejvms == NULL) { 1965 // No exception edges to simply kill off those paths 1966 if (callprojs->catchall_catchproj != NULL) { 1967 C->gvn_replace_by(callprojs->catchall_catchproj, C->top()); 1968 } 1969 if (callprojs->catchall_memproj != NULL) { 1970 C->gvn_replace_by(callprojs->catchall_memproj, C->top()); 1971 } 1972 if (callprojs->catchall_ioproj != NULL) { 1973 C->gvn_replace_by(callprojs->catchall_ioproj, C->top()); 1974 } 1975 // Replace the old exception object with top 1976 if (callprojs->exobj != NULL) { 1977 C->gvn_replace_by(callprojs->exobj, C->top()); 1978 } 1979 } else { 1980 GraphKit ekit(ejvms); 1981 1982 // Load my combined exception state into the kit, with all phis transformed: 1983 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states(); 1984 replaced_nodes_exception = ex_map->replaced_nodes(); 1985 1986 Node* ex_oop = ekit.use_exception_state(ex_map); 1987 1988 if (callprojs->catchall_catchproj != NULL) { 1989 C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control()); 1990 ex_ctl = ekit.control(); 1991 } 1992 if (callprojs->catchall_memproj != NULL) { 1993 C->gvn_replace_by(callprojs->catchall_memproj, ekit.reset_memory()); 1994 } 1995 if (callprojs->catchall_ioproj != NULL) { 1996 C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o()); 1997 } 1998 1999 // Replace the old exception object with the newly created one 2000 if (callprojs->exobj != NULL) { 2001 C->gvn_replace_by(callprojs->exobj, ex_oop); 2002 } 2003 } 2004 2005 // Disconnect the call from the graph 2006 call->disconnect_inputs(NULL, C); 2007 C->gvn_replace_by(call, C->top()); 2008 2009 // Clean up any MergeMems that feed other MergeMems since the 2010 // optimizer doesn't like that. 2011 if (final_mem->is_MergeMem()) { 2012 Node_List wl; 2013 for (SimpleDUIterator i(final_mem); i.has_next(); i.next()) { 2014 Node* m = i.get(); 2015 if (m->is_MergeMem() && !wl.contains(m)) { 2016 wl.push(m); 2017 } 2018 } 2019 while (wl.size() > 0) { 2020 _gvn.transform(wl.pop()); 2021 } 2022 } 2023 2024 if (callprojs->fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) { 2025 replaced_nodes.apply(C, final_ctl); 2026 } 2027 if (!ex_ctl->is_top() && do_replaced_nodes) { 2028 replaced_nodes_exception.apply(C, ex_ctl); 2029 } 2030 } 2031 2032 2033 //------------------------------increment_counter------------------------------ 2034 // for statistics: increment a VM counter by 1 2035 2036 void GraphKit::increment_counter(address counter_addr) { 2037 Node* adr1 = makecon(TypeRawPtr::make(counter_addr)); 2038 increment_counter(adr1); 2039 } 2040 2041 void GraphKit::increment_counter(Node* counter_addr) { 2042 int adr_type = Compile::AliasIdxRaw; 2043 Node* ctrl = control(); 2044 Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 2045 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1))); 2046 store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered); 2047 } 2048 2049 2050 //------------------------------uncommon_trap---------------------------------- 2051 // Bail out to the interpreter in mid-method. Implemented by calling the 2052 // uncommon_trap blob. This helper function inserts a runtime call with the 2053 // right debug info. 2054 void GraphKit::uncommon_trap(int trap_request, 2055 ciKlass* klass, const char* comment, 2056 bool must_throw, 2057 bool keep_exact_action) { 2058 if (failing()) stop(); 2059 if (stopped()) return; // trap reachable? 2060 2061 // Note: If ProfileTraps is true, and if a deopt. actually 2062 // occurs here, the runtime will make sure an MDO exists. There is 2063 // no need to call method()->ensure_method_data() at this point. 2064 2065 // Set the stack pointer to the right value for reexecution: 2066 set_sp(reexecute_sp()); 2067 2068 #ifdef ASSERT 2069 if (!must_throw) { 2070 // Make sure the stack has at least enough depth to execute 2071 // the current bytecode. 2072 int inputs, ignored_depth; 2073 if (compute_stack_effects(inputs, ignored_depth)) { 2074 assert(sp() >= inputs, "must have enough JVMS stack to execute %s: sp=%d, inputs=%d", 2075 Bytecodes::name(java_bc()), sp(), inputs); 2076 } 2077 } 2078 #endif 2079 2080 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request); 2081 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request); 2082 2083 switch (action) { 2084 case Deoptimization::Action_maybe_recompile: 2085 case Deoptimization::Action_reinterpret: 2086 // Temporary fix for 6529811 to allow virtual calls to be sure they 2087 // get the chance to go from mono->bi->mega 2088 if (!keep_exact_action && 2089 Deoptimization::trap_request_index(trap_request) < 0 && 2090 too_many_recompiles(reason)) { 2091 // This BCI is causing too many recompilations. 2092 if (C->log() != NULL) { 2093 C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'", 2094 Deoptimization::trap_reason_name(reason), 2095 Deoptimization::trap_action_name(action)); 2096 } 2097 action = Deoptimization::Action_none; 2098 trap_request = Deoptimization::make_trap_request(reason, action); 2099 } else { 2100 C->set_trap_can_recompile(true); 2101 } 2102 break; 2103 case Deoptimization::Action_make_not_entrant: 2104 C->set_trap_can_recompile(true); 2105 break; 2106 case Deoptimization::Action_none: 2107 case Deoptimization::Action_make_not_compilable: 2108 break; 2109 default: 2110 #ifdef ASSERT 2111 fatal("unknown action %d: %s", action, Deoptimization::trap_action_name(action)); 2112 #endif 2113 break; 2114 } 2115 2116 if (TraceOptoParse) { 2117 char buf[100]; 2118 tty->print_cr("Uncommon trap %s at bci:%d", 2119 Deoptimization::format_trap_request(buf, sizeof(buf), 2120 trap_request), bci()); 2121 } 2122 2123 CompileLog* log = C->log(); 2124 if (log != NULL) { 2125 int kid = (klass == NULL)? -1: log->identify(klass); 2126 log->begin_elem("uncommon_trap bci='%d'", bci()); 2127 char buf[100]; 2128 log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf), 2129 trap_request)); 2130 if (kid >= 0) log->print(" klass='%d'", kid); 2131 if (comment != NULL) log->print(" comment='%s'", comment); 2132 log->end_elem(); 2133 } 2134 2135 // Make sure any guarding test views this path as very unlikely 2136 Node *i0 = control()->in(0); 2137 if (i0 != NULL && i0->is_If()) { // Found a guarding if test? 2138 IfNode *iff = i0->as_If(); 2139 float f = iff->_prob; // Get prob 2140 if (control()->Opcode() == Op_IfTrue) { 2141 if (f > PROB_UNLIKELY_MAG(4)) 2142 iff->_prob = PROB_MIN; 2143 } else { 2144 if (f < PROB_LIKELY_MAG(4)) 2145 iff->_prob = PROB_MAX; 2146 } 2147 } 2148 2149 // Clear out dead values from the debug info. 2150 kill_dead_locals(); 2151 2152 // Now insert the uncommon trap subroutine call 2153 address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); 2154 const TypePtr* no_memory_effects = NULL; 2155 // Pass the index of the class to be loaded 2156 Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON | 2157 (must_throw ? RC_MUST_THROW : 0), 2158 OptoRuntime::uncommon_trap_Type(), 2159 call_addr, "uncommon_trap", no_memory_effects, 2160 intcon(trap_request)); 2161 assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request, 2162 "must extract request correctly from the graph"); 2163 assert(trap_request != 0, "zero value reserved by uncommon_trap_request"); 2164 2165 call->set_req(TypeFunc::ReturnAdr, returnadr()); 2166 // The debug info is the only real input to this call. 2167 2168 // Halt-and-catch fire here. The above call should never return! 2169 HaltNode* halt = new HaltNode(control(), frameptr()); 2170 _gvn.set_type_bottom(halt); 2171 root()->add_req(halt); 2172 2173 stop_and_kill_map(); 2174 } 2175 2176 2177 //--------------------------just_allocated_object------------------------------ 2178 // Report the object that was just allocated. 2179 // It must be the case that there are no intervening safepoints. 2180 // We use this to determine if an object is so "fresh" that 2181 // it does not require card marks. 2182 Node* GraphKit::just_allocated_object(Node* current_control) { 2183 if (C->recent_alloc_ctl() == current_control) 2184 return C->recent_alloc_obj(); 2185 return NULL; 2186 } 2187 2188 2189 void GraphKit::round_double_arguments(ciMethod* dest_method) { 2190 // (Note: TypeFunc::make has a cache that makes this fast.) 2191 const TypeFunc* tf = TypeFunc::make(dest_method); 2192 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms; 2193 for (int j = 0; j < nargs; j++) { 2194 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms); 2195 if( targ->basic_type() == T_DOUBLE ) { 2196 // If any parameters are doubles, they must be rounded before 2197 // the call, dstore_rounding does gvn.transform 2198 Node *arg = argument(j); 2199 arg = dstore_rounding(arg); 2200 set_argument(j, arg); 2201 } 2202 } 2203 } 2204 2205 /** 2206 * Record profiling data exact_kls for Node n with the type system so 2207 * that it can propagate it (speculation) 2208 * 2209 * @param n node that the type applies to 2210 * @param exact_kls type from profiling 2211 * @param maybe_null did profiling see null? 2212 * 2213 * @return node with improved type 2214 */ 2215 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) { 2216 const Type* current_type = _gvn.type(n); 2217 assert(UseTypeSpeculation, "type speculation must be on"); 2218 2219 const TypePtr* speculative = current_type->speculative(); 2220 2221 // Should the klass from the profile be recorded in the speculative type? 2222 if (current_type->would_improve_type(exact_kls, jvms()->depth())) { 2223 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls); 2224 const TypeOopPtr* xtype = tklass->as_instance_type(); 2225 assert(xtype->klass_is_exact(), "Should be exact"); 2226 // Any reason to believe n is not null (from this profiling or a previous one)? 2227 assert(ptr_kind != ProfileAlwaysNull, "impossible here"); 2228 const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL; 2229 // record the new speculative type's depth 2230 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr(); 2231 speculative = speculative->with_inline_depth(jvms()->depth()); 2232 } else if (current_type->would_improve_ptr(ptr_kind)) { 2233 // Profiling report that null was never seen so we can change the 2234 // speculative type to non null ptr. 2235 if (ptr_kind == ProfileAlwaysNull) { 2236 speculative = TypePtr::NULL_PTR; 2237 } else { 2238 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement"); 2239 const TypePtr* ptr = TypePtr::NOTNULL; 2240 if (speculative != NULL) { 2241 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr(); 2242 } else { 2243 speculative = ptr; 2244 } 2245 } 2246 } 2247 2248 if (speculative != current_type->speculative()) { 2249 // Build a type with a speculative type (what we think we know 2250 // about the type but will need a guard when we use it) 2251 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative); 2252 // We're changing the type, we need a new CheckCast node to carry 2253 // the new type. The new type depends on the control: what 2254 // profiling tells us is only valid from here as far as we can 2255 // tell. 2256 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type)); 2257 cast = _gvn.transform(cast); 2258 replace_in_map(n, cast); 2259 n = cast; 2260 } 2261 2262 return n; 2263 } 2264 2265 /** 2266 * Record profiling data from receiver profiling at an invoke with the 2267 * type system so that it can propagate it (speculation) 2268 * 2269 * @param n receiver node 2270 * 2271 * @return node with improved type 2272 */ 2273 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { 2274 if (!UseTypeSpeculation) { 2275 return n; 2276 } 2277 ciKlass* exact_kls = profile_has_unique_klass(); 2278 ProfilePtrKind ptr_kind = ProfileMaybeNull; 2279 if ((java_bc() == Bytecodes::_checkcast || 2280 java_bc() == Bytecodes::_instanceof || 2281 java_bc() == Bytecodes::_aastore) && 2282 method()->method_data()->is_mature()) { 2283 ciProfileData* data = method()->method_data()->bci_to_data(bci()); 2284 if (data != NULL) { 2285 if (!data->as_BitData()->null_seen()) { 2286 ptr_kind = ProfileNeverNull; 2287 } else { 2288 assert(data->is_ReceiverTypeData(), "bad profile data type"); 2289 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData(); 2290 uint i = 0; 2291 for (; i < call->row_limit(); i++) { 2292 ciKlass* receiver = call->receiver(i); 2293 if (receiver != NULL) { 2294 break; 2295 } 2296 } 2297 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull; 2298 } 2299 } 2300 } 2301 return record_profile_for_speculation(n, exact_kls, ptr_kind); 2302 } 2303 2304 /** 2305 * Record profiling data from argument profiling at an invoke with the 2306 * type system so that it can propagate it (speculation) 2307 * 2308 * @param dest_method target method for the call 2309 * @param bc what invoke bytecode is this? 2310 */ 2311 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) { 2312 if (!UseTypeSpeculation) { 2313 return; 2314 } 2315 const TypeFunc* tf = TypeFunc::make(dest_method); 2316 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms; 2317 int skip = Bytecodes::has_receiver(bc) ? 1 : 0; 2318 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) { 2319 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms); 2320 if (targ->isa_oopptr() && !targ->isa_valuetypeptr()) { 2321 ProfilePtrKind ptr_kind = ProfileMaybeNull; 2322 ciKlass* better_type = NULL; 2323 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) { 2324 record_profile_for_speculation(argument(j), better_type, ptr_kind); 2325 } 2326 i++; 2327 } 2328 } 2329 } 2330 2331 /** 2332 * Record profiling data from parameter profiling at an invoke with 2333 * the type system so that it can propagate it (speculation) 2334 */ 2335 void GraphKit::record_profiled_parameters_for_speculation() { 2336 if (!UseTypeSpeculation) { 2337 return; 2338 } 2339 for (int i = 0, j = 0; i < method()->arg_size() ; i++) { 2340 if (_gvn.type(local(i))->isa_oopptr()) { 2341 ProfilePtrKind ptr_kind = ProfileMaybeNull; 2342 ciKlass* better_type = NULL; 2343 if (method()->parameter_profiled_type(j, better_type, ptr_kind)) { 2344 record_profile_for_speculation(local(i), better_type, ptr_kind); 2345 } 2346 j++; 2347 } 2348 } 2349 } 2350 2351 /** 2352 * Record profiling data from return value profiling at an invoke with 2353 * the type system so that it can propagate it (speculation) 2354 */ 2355 void GraphKit::record_profiled_return_for_speculation() { 2356 if (!UseTypeSpeculation) { 2357 return; 2358 } 2359 ProfilePtrKind ptr_kind = ProfileMaybeNull; 2360 ciKlass* better_type = NULL; 2361 if (method()->return_profiled_type(bci(), better_type, ptr_kind)) { 2362 // If profiling reports a single type for the return value, 2363 // feed it to the type system so it can propagate it as a 2364 // speculative type 2365 record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind); 2366 } 2367 } 2368 2369 void GraphKit::round_double_result(ciMethod* dest_method) { 2370 // A non-strict method may return a double value which has an extended 2371 // exponent, but this must not be visible in a caller which is 'strict' 2372 // If a strict caller invokes a non-strict callee, round a double result 2373 2374 BasicType result_type = dest_method->return_type()->basic_type(); 2375 assert( method() != NULL, "must have caller context"); 2376 if( result_type == T_DOUBLE && method()->is_strict() && !dest_method->is_strict() ) { 2377 // Destination method's return value is on top of stack 2378 // dstore_rounding() does gvn.transform 2379 Node *result = pop_pair(); 2380 result = dstore_rounding(result); 2381 push_pair(result); 2382 } 2383 } 2384 2385 // rounding for strict float precision conformance 2386 Node* GraphKit::precision_rounding(Node* n) { 2387 return UseStrictFP && _method->flags().is_strict() 2388 && UseSSE == 0 && Matcher::strict_fp_requires_explicit_rounding 2389 ? _gvn.transform( new RoundFloatNode(0, n) ) 2390 : n; 2391 } 2392 2393 // rounding for strict double precision conformance 2394 Node* GraphKit::dprecision_rounding(Node *n) { 2395 return UseStrictFP && _method->flags().is_strict() 2396 && UseSSE <= 1 && Matcher::strict_fp_requires_explicit_rounding 2397 ? _gvn.transform( new RoundDoubleNode(0, n) ) 2398 : n; 2399 } 2400 2401 // rounding for non-strict double stores 2402 Node* GraphKit::dstore_rounding(Node* n) { 2403 return Matcher::strict_fp_requires_explicit_rounding 2404 && UseSSE <= 1 2405 ? _gvn.transform( new RoundDoubleNode(0, n) ) 2406 : n; 2407 } 2408 2409 //============================================================================= 2410 // Generate a fast path/slow path idiom. Graph looks like: 2411 // [foo] indicates that 'foo' is a parameter 2412 // 2413 // [in] NULL 2414 // \ / 2415 // CmpP 2416 // Bool ne 2417 // If 2418 // / \ 2419 // True False-<2> 2420 // / | 2421 // / cast_not_null 2422 // Load | | ^ 2423 // [fast_test] | | 2424 // gvn to opt_test | | 2425 // / \ | <1> 2426 // True False | 2427 // | \\ | 2428 // [slow_call] \[fast_result] 2429 // Ctl Val \ \ 2430 // | \ \ 2431 // Catch <1> \ \ 2432 // / \ ^ \ \ 2433 // Ex No_Ex | \ \ 2434 // | \ \ | \ <2> \ 2435 // ... \ [slow_res] | | \ [null_result] 2436 // \ \--+--+--- | | 2437 // \ | / \ | / 2438 // --------Region Phi 2439 // 2440 //============================================================================= 2441 // Code is structured as a series of driver functions all called 'do_XXX' that 2442 // call a set of helper functions. Helper functions first, then drivers. 2443 2444 //------------------------------null_check_oop--------------------------------- 2445 // Null check oop. Set null-path control into Region in slot 3. 2446 // Make a cast-not-nullness use the other not-null control. Return cast. 2447 Node* GraphKit::null_check_oop(Node* value, Node* *null_control, 2448 bool never_see_null, 2449 bool safe_for_replace, 2450 bool speculative) { 2451 // Initial NULL check taken path 2452 (*null_control) = top(); 2453 Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative); 2454 2455 // Generate uncommon_trap: 2456 if (never_see_null && (*null_control) != top()) { 2457 // If we see an unexpected null at a check-cast we record it and force a 2458 // recompile; the offending check-cast will be compiled to handle NULLs. 2459 // If we see more than one offending BCI, then all checkcasts in the 2460 // method will be compiled to handle NULLs. 2461 PreserveJVMState pjvms(this); 2462 set_control(*null_control); 2463 replace_in_map(value, null()); 2464 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative); 2465 uncommon_trap(reason, 2466 Deoptimization::Action_make_not_entrant); 2467 (*null_control) = top(); // NULL path is dead 2468 } 2469 if ((*null_control) == top() && safe_for_replace) { 2470 replace_in_map(value, cast); 2471 } 2472 2473 // Cast away null-ness on the result 2474 return cast; 2475 } 2476 2477 //------------------------------opt_iff---------------------------------------- 2478 // Optimize the fast-check IfNode. Set the fast-path region slot 2. 2479 // Return slow-path control. 2480 Node* GraphKit::opt_iff(Node* region, Node* iff) { 2481 IfNode *opt_iff = _gvn.transform(iff)->as_If(); 2482 2483 // Fast path taken; set region slot 2 2484 Node *fast_taken = _gvn.transform( new IfFalseNode(opt_iff) ); 2485 region->init_req(2,fast_taken); // Capture fast-control 2486 2487 // Fast path not-taken, i.e. slow path 2488 Node *slow_taken = _gvn.transform( new IfTrueNode(opt_iff) ); 2489 return slow_taken; 2490 } 2491 2492 //-----------------------------make_runtime_call------------------------------- 2493 Node* GraphKit::make_runtime_call(int flags, 2494 const TypeFunc* call_type, address call_addr, 2495 const char* call_name, 2496 const TypePtr* adr_type, 2497 // The following parms are all optional. 2498 // The first NULL ends the list. 2499 Node* parm0, Node* parm1, 2500 Node* parm2, Node* parm3, 2501 Node* parm4, Node* parm5, 2502 Node* parm6, Node* parm7) { 2503 // Slow-path call 2504 bool is_leaf = !(flags & RC_NO_LEAF); 2505 bool has_io = (!is_leaf && !(flags & RC_NO_IO)); 2506 if (call_name == NULL) { 2507 assert(!is_leaf, "must supply name for leaf"); 2508 call_name = OptoRuntime::stub_name(call_addr); 2509 } 2510 CallNode* call; 2511 if (!is_leaf) { 2512 call = new CallStaticJavaNode(call_type, call_addr, call_name, 2513 bci(), adr_type); 2514 } else if (flags & RC_NO_FP) { 2515 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type); 2516 } else { 2517 call = new CallLeafNode(call_type, call_addr, call_name, adr_type); 2518 } 2519 2520 // The following is similar to set_edges_for_java_call, 2521 // except that the memory effects of the call are restricted to AliasIdxRaw. 2522 2523 // Slow path call has no side-effects, uses few values 2524 bool wide_in = !(flags & RC_NARROW_MEM); 2525 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot); 2526 2527 Node* prev_mem = NULL; 2528 if (wide_in) { 2529 prev_mem = set_predefined_input_for_runtime_call(call); 2530 } else { 2531 assert(!wide_out, "narrow in => narrow out"); 2532 Node* narrow_mem = memory(adr_type); 2533 prev_mem = reset_memory(); 2534 map()->set_memory(narrow_mem); 2535 set_predefined_input_for_runtime_call(call); 2536 } 2537 2538 // Hook each parm in order. Stop looking at the first NULL. 2539 if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0); 2540 if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1); 2541 if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2); 2542 if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3); 2543 if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4); 2544 if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5); 2545 if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6); 2546 if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7); 2547 /* close each nested if ===> */ } } } } } } } } 2548 assert(call->in(call->req()-1) != NULL, "must initialize all parms"); 2549 2550 if (!is_leaf) { 2551 // Non-leaves can block and take safepoints: 2552 add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0)); 2553 } 2554 // Non-leaves can throw exceptions: 2555 if (has_io) { 2556 call->set_req(TypeFunc::I_O, i_o()); 2557 } 2558 2559 if (flags & RC_UNCOMMON) { 2560 // Set the count to a tiny probability. Cf. Estimate_Block_Frequency. 2561 // (An "if" probability corresponds roughly to an unconditional count. 2562 // Sort of.) 2563 call->set_cnt(PROB_UNLIKELY_MAG(4)); 2564 } 2565 2566 Node* c = _gvn.transform(call); 2567 assert(c == call, "cannot disappear"); 2568 2569 if (wide_out) { 2570 // Slow path call has full side-effects. 2571 set_predefined_output_for_runtime_call(call); 2572 } else { 2573 // Slow path call has few side-effects, and/or sets few values. 2574 set_predefined_output_for_runtime_call(call, prev_mem, adr_type); 2575 } 2576 2577 if (has_io) { 2578 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O))); 2579 } 2580 return call; 2581 2582 } 2583 2584 //------------------------------merge_memory----------------------------------- 2585 // Merge memory from one path into the current memory state. 2586 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) { 2587 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) { 2588 Node* old_slice = mms.force_memory(); 2589 Node* new_slice = mms.memory2(); 2590 if (old_slice != new_slice) { 2591 PhiNode* phi; 2592 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) { 2593 if (mms.is_empty()) { 2594 // clone base memory Phi's inputs for this memory slice 2595 assert(old_slice == mms.base_memory(), "sanity"); 2596 phi = PhiNode::make(region, NULL, Type::MEMORY, mms.adr_type(C)); 2597 _gvn.set_type(phi, Type::MEMORY); 2598 for (uint i = 1; i < phi->req(); i++) { 2599 phi->init_req(i, old_slice->in(i)); 2600 } 2601 } else { 2602 phi = old_slice->as_Phi(); // Phi was generated already 2603 } 2604 } else { 2605 phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C)); 2606 _gvn.set_type(phi, Type::MEMORY); 2607 } 2608 phi->set_req(new_path, new_slice); 2609 mms.set_memory(phi); 2610 } 2611 } 2612 } 2613 2614 //------------------------------make_slow_call_ex------------------------------ 2615 // Make the exception handler hookups for the slow call 2616 void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize) { 2617 if (stopped()) return; 2618 2619 // Make a catch node with just two handlers: fall-through and catch-all 2620 Node* i_o = _gvn.transform( new ProjNode(call, TypeFunc::I_O, separate_io_proj) ); 2621 Node* catc = _gvn.transform( new CatchNode(control(), i_o, 2) ); 2622 Node* norm = _gvn.transform( new CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) ); 2623 Node* excp = _gvn.transform( new CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci) ); 2624 2625 { PreserveJVMState pjvms(this); 2626 set_control(excp); 2627 set_i_o(i_o); 2628 2629 if (excp != top()) { 2630 if (deoptimize) { 2631 // Deoptimize if an exception is caught. Don't construct exception state in this case. 2632 uncommon_trap(Deoptimization::Reason_unhandled, 2633 Deoptimization::Action_none); 2634 } else { 2635 // Create an exception state also. 2636 // Use an exact type if the caller has specified a specific exception. 2637 const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull); 2638 Node* ex_oop = new CreateExNode(ex_type, control(), i_o); 2639 add_exception_state(make_exception_state(_gvn.transform(ex_oop))); 2640 } 2641 } 2642 } 2643 2644 // Get the no-exception control from the CatchNode. 2645 set_control(norm); 2646 } 2647 2648 static IfNode* gen_subtype_check_compare(Node* ctrl, Node* in1, Node* in2, BoolTest::mask test, float p, PhaseGVN* gvn, BasicType bt) { 2649 Node* cmp = NULL; 2650 switch(bt) { 2651 case T_INT: cmp = new CmpINode(in1, in2); break; 2652 case T_ADDRESS: cmp = new CmpPNode(in1, in2); break; 2653 default: fatal("unexpected comparison type %s", type2name(bt)); 2654 } 2655 gvn->transform(cmp); 2656 Node* bol = gvn->transform(new BoolNode(cmp, test)); 2657 IfNode* iff = new IfNode(ctrl, bol, p, COUNT_UNKNOWN); 2658 gvn->transform(iff); 2659 if (!bol->is_Con()) gvn->record_for_igvn(iff); 2660 return iff; 2661 } 2662 2663 2664 //-------------------------------gen_subtype_check----------------------------- 2665 // Generate a subtyping check. Takes as input the subtype and supertype. 2666 // Returns 2 values: sets the default control() to the true path and returns 2667 // the false path. Only reads invariant memory; sets no (visible) memory. 2668 // The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding 2669 // but that's not exposed to the optimizer. This call also doesn't take in an 2670 // Object; if you wish to check an Object you need to load the Object's class 2671 // prior to coming here. 2672 Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, MergeMemNode* mem, PhaseGVN* gvn) { 2673 Compile* C = gvn->C; 2674 2675 if ((*ctrl)->is_top()) { 2676 return C->top(); 2677 } 2678 2679 // Fast check for identical types, perhaps identical constants. 2680 // The types can even be identical non-constants, in cases 2681 // involving Array.newInstance, Object.clone, etc. 2682 if (subklass == superklass) 2683 return C->top(); // false path is dead; no test needed. 2684 2685 if (gvn->type(superklass)->singleton()) { 2686 ciKlass* superk = gvn->type(superklass)->is_klassptr()->klass(); 2687 ciKlass* subk = gvn->type(subklass)->is_klassptr()->klass(); 2688 2689 // In the common case of an exact superklass, try to fold up the 2690 // test before generating code. You may ask, why not just generate 2691 // the code and then let it fold up? The answer is that the generated 2692 // code will necessarily include null checks, which do not always 2693 // completely fold away. If they are also needless, then they turn 2694 // into a performance loss. Example: 2695 // Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x; 2696 // Here, the type of 'fa' is often exact, so the store check 2697 // of fa[1]=x will fold up, without testing the nullness of x. 2698 switch (C->static_subtype_check(superk, subk)) { 2699 case Compile::SSC_always_false: 2700 { 2701 Node* always_fail = *ctrl; 2702 *ctrl = gvn->C->top(); 2703 return always_fail; 2704 } 2705 case Compile::SSC_always_true: 2706 return C->top(); 2707 case Compile::SSC_easy_test: 2708 { 2709 // Just do a direct pointer compare and be done. 2710 IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT, gvn, T_ADDRESS); 2711 *ctrl = gvn->transform(new IfTrueNode(iff)); 2712 return gvn->transform(new IfFalseNode(iff)); 2713 } 2714 case Compile::SSC_full_test: 2715 break; 2716 default: 2717 ShouldNotReachHere(); 2718 } 2719 } 2720 2721 // %%% Possible further optimization: Even if the superklass is not exact, 2722 // if the subklass is the unique subtype of the superklass, the check 2723 // will always succeed. We could leave a dependency behind to ensure this. 2724 2725 // First load the super-klass's check-offset 2726 Node *p1 = gvn->transform(new AddPNode(superklass, superklass, gvn->MakeConX(in_bytes(Klass::super_check_offset_offset())))); 2727 Node* m = mem->memory_at(C->get_alias_index(gvn->type(p1)->is_ptr())); 2728 Node *chk_off = gvn->transform(new LoadINode(NULL, m, p1, gvn->type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); 2729 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); 2730 bool might_be_cache = (gvn->find_int_con(chk_off, cacheoff_con) == cacheoff_con); 2731 2732 // Load from the sub-klass's super-class display list, or a 1-word cache of 2733 // the secondary superclass list, or a failing value with a sentinel offset 2734 // if the super-klass is an interface or exceptionally deep in the Java 2735 // hierarchy and we have to scan the secondary superclass list the hard way. 2736 // Worst-case type is a little odd: NULL is allowed as a result (usually 2737 // klass loads can never produce a NULL). 2738 Node *chk_off_X = chk_off; 2739 #ifdef _LP64 2740 chk_off_X = gvn->transform(new ConvI2LNode(chk_off_X)); 2741 #endif 2742 Node *p2 = gvn->transform(new AddPNode(subklass,subklass,chk_off_X)); 2743 // For some types like interfaces the following loadKlass is from a 1-word 2744 // cache which is mutable so can't use immutable memory. Other 2745 // types load from the super-class display table which is immutable. 2746 m = mem->memory_at(C->get_alias_index(gvn->type(p2)->is_ptr())); 2747 Node *kmem = might_be_cache ? m : C->immutable_memory(); 2748 Node *nkls = gvn->transform(LoadKlassNode::make(*gvn, NULL, kmem, p2, gvn->type(p2)->is_ptr(), TypeKlassPtr::BOTTOM)); 2749 2750 // Compile speed common case: ARE a subtype and we canNOT fail 2751 if( superklass == nkls ) 2752 return C->top(); // false path is dead; no test needed. 2753 2754 // See if we get an immediate positive hit. Happens roughly 83% of the 2755 // time. Test to see if the value loaded just previously from the subklass 2756 // is exactly the superklass. 2757 IfNode *iff1 = gen_subtype_check_compare(*ctrl, superklass, nkls, BoolTest::eq, PROB_LIKELY(0.83f), gvn, T_ADDRESS); 2758 Node *iftrue1 = gvn->transform( new IfTrueNode (iff1)); 2759 *ctrl = gvn->transform(new IfFalseNode(iff1)); 2760 2761 // Compile speed common case: Check for being deterministic right now. If 2762 // chk_off is a constant and not equal to cacheoff then we are NOT a 2763 // subklass. In this case we need exactly the 1 test above and we can 2764 // return those results immediately. 2765 if (!might_be_cache) { 2766 Node* not_subtype_ctrl = *ctrl; 2767 *ctrl = iftrue1; // We need exactly the 1 test above 2768 return not_subtype_ctrl; 2769 } 2770 2771 // Gather the various success & failures here 2772 RegionNode *r_ok_subtype = new RegionNode(4); 2773 gvn->record_for_igvn(r_ok_subtype); 2774 RegionNode *r_not_subtype = new RegionNode(3); 2775 gvn->record_for_igvn(r_not_subtype); 2776 2777 r_ok_subtype->init_req(1, iftrue1); 2778 2779 // Check for immediate negative hit. Happens roughly 11% of the time (which 2780 // is roughly 63% of the remaining cases). Test to see if the loaded 2781 // check-offset points into the subklass display list or the 1-element 2782 // cache. If it points to the display (and NOT the cache) and the display 2783 // missed then it's not a subtype. 2784 Node *cacheoff = gvn->intcon(cacheoff_con); 2785 IfNode *iff2 = gen_subtype_check_compare(*ctrl, chk_off, cacheoff, BoolTest::ne, PROB_LIKELY(0.63f), gvn, T_INT); 2786 r_not_subtype->init_req(1, gvn->transform(new IfTrueNode (iff2))); 2787 *ctrl = gvn->transform(new IfFalseNode(iff2)); 2788 2789 // Check for self. Very rare to get here, but it is taken 1/3 the time. 2790 // No performance impact (too rare) but allows sharing of secondary arrays 2791 // which has some footprint reduction. 2792 IfNode *iff3 = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_LIKELY(0.36f), gvn, T_ADDRESS); 2793 r_ok_subtype->init_req(2, gvn->transform(new IfTrueNode(iff3))); 2794 *ctrl = gvn->transform(new IfFalseNode(iff3)); 2795 2796 // -- Roads not taken here: -- 2797 // We could also have chosen to perform the self-check at the beginning 2798 // of this code sequence, as the assembler does. This would not pay off 2799 // the same way, since the optimizer, unlike the assembler, can perform 2800 // static type analysis to fold away many successful self-checks. 2801 // Non-foldable self checks work better here in second position, because 2802 // the initial primary superclass check subsumes a self-check for most 2803 // types. An exception would be a secondary type like array-of-interface, 2804 // which does not appear in its own primary supertype display. 2805 // Finally, we could have chosen to move the self-check into the 2806 // PartialSubtypeCheckNode, and from there out-of-line in a platform 2807 // dependent manner. But it is worthwhile to have the check here, 2808 // where it can be perhaps be optimized. The cost in code space is 2809 // small (register compare, branch). 2810 2811 // Now do a linear scan of the secondary super-klass array. Again, no real 2812 // performance impact (too rare) but it's gotta be done. 2813 // Since the code is rarely used, there is no penalty for moving it 2814 // out of line, and it can only improve I-cache density. 2815 // The decision to inline or out-of-line this final check is platform 2816 // dependent, and is found in the AD file definition of PartialSubtypeCheck. 2817 Node* psc = gvn->transform( 2818 new PartialSubtypeCheckNode(*ctrl, subklass, superklass)); 2819 2820 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn->zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS); 2821 r_not_subtype->init_req(2, gvn->transform(new IfTrueNode (iff4))); 2822 r_ok_subtype ->init_req(3, gvn->transform(new IfFalseNode(iff4))); 2823 2824 // Return false path; set default control to true path. 2825 *ctrl = gvn->transform(r_ok_subtype); 2826 return gvn->transform(r_not_subtype); 2827 } 2828 2829 // Profile-driven exact type check: 2830 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass, 2831 float prob, 2832 Node* *casted_receiver) { 2833 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); 2834 Node* recv_klass = load_object_klass(receiver); 2835 Node* fail = type_check(recv_klass, tklass, prob); 2836 const TypeOopPtr* recv_xtype = tklass->as_instance_type(); 2837 assert(recv_xtype->klass_is_exact(), ""); 2838 2839 // Subsume downstream occurrences of receiver with a cast to 2840 // recv_xtype, since now we know what the type will be. 2841 Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype); 2842 (*casted_receiver) = _gvn.transform(cast); 2843 // (User must make the replace_in_map call.) 2844 2845 return fail; 2846 } 2847 2848 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass, 2849 float prob) { 2850 //const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); 2851 Node* want_klass = makecon(tklass); 2852 Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass)); 2853 Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) ); 2854 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN); 2855 set_control( _gvn.transform( new IfTrueNode (iff))); 2856 Node* fail = _gvn.transform( new IfFalseNode(iff)); 2857 return fail; 2858 } 2859 2860 2861 //------------------------------seems_never_null------------------------------- 2862 // Use null_seen information if it is available from the profile. 2863 // If we see an unexpected null at a type check we record it and force a 2864 // recompile; the offending check will be recompiled to handle NULLs. 2865 // If we see several offending BCIs, then all checks in the 2866 // method will be recompiled. 2867 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) { 2868 speculating = !_gvn.type(obj)->speculative_maybe_null(); 2869 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating); 2870 if (UncommonNullCast // Cutout for this technique 2871 && obj != null() // And not the -Xcomp stupid case? 2872 && !too_many_traps(reason) 2873 ) { 2874 if (speculating) { 2875 return true; 2876 } 2877 if (data == NULL) 2878 // Edge case: no mature data. Be optimistic here. 2879 return true; 2880 // If the profile has not seen a null, assume it won't happen. 2881 assert(java_bc() == Bytecodes::_checkcast || 2882 java_bc() == Bytecodes::_instanceof || 2883 java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here"); 2884 return !data->as_BitData()->null_seen(); 2885 } 2886 speculating = false; 2887 return false; 2888 } 2889 2890 //------------------------maybe_cast_profiled_receiver------------------------- 2891 // If the profile has seen exactly one type, narrow to exactly that type. 2892 // Subsequent type checks will always fold up. 2893 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, 2894 ciKlass* require_klass, 2895 ciKlass* spec_klass, 2896 bool safe_for_replace) { 2897 if (!UseTypeProfile || !TypeProfileCasts) return NULL; 2898 2899 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL); 2900 2901 // Make sure we haven't already deoptimized from this tactic. 2902 if (too_many_traps(reason) || too_many_recompiles(reason)) 2903 return NULL; 2904 2905 // (No, this isn't a call, but it's enough like a virtual call 2906 // to use the same ciMethod accessor to get the profile info...) 2907 // If we have a speculative type use it instead of profiling (which 2908 // may not help us) 2909 ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass; 2910 if (exact_kls != NULL) {// no cast failures here 2911 if (require_klass == NULL || 2912 C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) { 2913 // If we narrow the type to match what the type profile sees or 2914 // the speculative type, we can then remove the rest of the 2915 // cast. 2916 // This is a win, even if the exact_kls is very specific, 2917 // because downstream operations, such as method calls, 2918 // will often benefit from the sharper type. 2919 Node* exact_obj = not_null_obj; // will get updated in place... 2920 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 2921 &exact_obj); 2922 { PreserveJVMState pjvms(this); 2923 set_control(slow_ctl); 2924 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile); 2925 } 2926 if (safe_for_replace) { 2927 replace_in_map(not_null_obj, exact_obj); 2928 } 2929 return exact_obj; 2930 } 2931 // assert(ssc == Compile::SSC_always_true)... except maybe the profile lied to us. 2932 } 2933 2934 return NULL; 2935 } 2936 2937 /** 2938 * Cast obj to type and emit guard unless we had too many traps here 2939 * already 2940 * 2941 * @param obj node being casted 2942 * @param type type to cast the node to 2943 * @param not_null true if we know node cannot be null 2944 */ 2945 Node* GraphKit::maybe_cast_profiled_obj(Node* obj, 2946 ciKlass* type, 2947 bool not_null) { 2948 if (stopped()) { 2949 return obj; 2950 } 2951 2952 // type == NULL if profiling tells us this object is always null 2953 if (type != NULL) { 2954 Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check; 2955 Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check; 2956 2957 if (!too_many_traps(null_reason) && !too_many_recompiles(null_reason) && 2958 !too_many_traps(class_reason) && 2959 !too_many_recompiles(class_reason)) { 2960 Node* not_null_obj = NULL; 2961 // not_null is true if we know the object is not null and 2962 // there's no need for a null check 2963 if (!not_null) { 2964 Node* null_ctl = top(); 2965 not_null_obj = null_check_oop(obj, &null_ctl, true, true, true); 2966 assert(null_ctl->is_top(), "no null control here"); 2967 } else { 2968 not_null_obj = obj; 2969 } 2970 2971 Node* exact_obj = not_null_obj; 2972 ciKlass* exact_kls = type; 2973 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 2974 &exact_obj); 2975 { 2976 PreserveJVMState pjvms(this); 2977 set_control(slow_ctl); 2978 uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile); 2979 } 2980 replace_in_map(not_null_obj, exact_obj); 2981 obj = exact_obj; 2982 } 2983 } else { 2984 if (!too_many_traps(Deoptimization::Reason_null_assert) && 2985 !too_many_recompiles(Deoptimization::Reason_null_assert)) { 2986 Node* exact_obj = null_assert(obj); 2987 replace_in_map(obj, exact_obj); 2988 obj = exact_obj; 2989 } 2990 } 2991 return obj; 2992 } 2993 2994 //-------------------------------gen_instanceof-------------------------------- 2995 // Generate an instance-of idiom. Used by both the instance-of bytecode 2996 // and the reflective instance-of call. 2997 Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) { 2998 kill_dead_locals(); // Benefit all the uncommon traps 2999 assert( !stopped(), "dead parse path should be checked in callers" ); 3000 assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()), 3001 "must check for not-null not-dead klass in callers"); 3002 3003 // Make the merge point 3004 enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT }; 3005 RegionNode* region = new RegionNode(PATH_LIMIT); 3006 Node* phi = new PhiNode(region, TypeInt::BOOL); 3007 C->set_has_split_ifs(true); // Has chance for split-if optimization 3008 3009 ciProfileData* data = NULL; 3010 if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode 3011 data = method()->method_data()->bci_to_data(bci()); 3012 } 3013 bool speculative_not_null = false; 3014 bool never_see_null = (ProfileDynamicTypes // aggressive use of profile 3015 && seems_never_null(obj, data, speculative_not_null)); 3016 3017 // Null check; get casted pointer; set region slot 3 3018 Node* null_ctl = top(); 3019 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); 3020 3021 // If not_null_obj is dead, only null-path is taken 3022 if (stopped()) { // Doing instance-of on a NULL? 3023 set_control(null_ctl); 3024 return intcon(0); 3025 } 3026 region->init_req(_null_path, null_ctl); 3027 phi ->init_req(_null_path, intcon(0)); // Set null path value 3028 if (null_ctl == top()) { 3029 // Do this eagerly, so that pattern matches like is_diamond_phi 3030 // will work even during parsing. 3031 assert(_null_path == PATH_LIMIT-1, "delete last"); 3032 region->del_req(_null_path); 3033 phi ->del_req(_null_path); 3034 } 3035 3036 // Do we know the type check always succeed? 3037 bool known_statically = false; 3038 if (_gvn.type(superklass)->singleton()) { 3039 ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass(); 3040 ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass(); 3041 if (subk != NULL && subk->is_loaded()) { 3042 int static_res = C->static_subtype_check(superk, subk); 3043 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false); 3044 } 3045 } 3046 3047 if (!known_statically) { 3048 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 3049 // We may not have profiling here or it may not help us. If we 3050 // have a speculative type use it to perform an exact cast. 3051 ciKlass* spec_obj_type = obj_type->speculative_type(); 3052 if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) { 3053 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace); 3054 if (stopped()) { // Profile disagrees with this path. 3055 set_control(null_ctl); // Null is the only remaining possibility. 3056 return intcon(0); 3057 } 3058 if (cast_obj != NULL) { 3059 not_null_obj = cast_obj; 3060 } 3061 } 3062 } 3063 3064 // Load the object's klass 3065 Node* obj_klass = load_object_klass(not_null_obj); 3066 3067 // Generate the subtype check 3068 Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass); 3069 3070 // Plug in the success path to the general merge in slot 1. 3071 region->init_req(_obj_path, control()); 3072 phi ->init_req(_obj_path, intcon(1)); 3073 3074 // Plug in the failing path to the general merge in slot 2. 3075 region->init_req(_fail_path, not_subtype_ctrl); 3076 phi ->init_req(_fail_path, intcon(0)); 3077 3078 // Return final merged results 3079 set_control( _gvn.transform(region) ); 3080 record_for_igvn(region); 3081 3082 // If we know the type check always succeeds then we don't use the 3083 // profiling data at this bytecode. Don't lose it, feed it to the 3084 // type system as a speculative type. 3085 if (safe_for_replace) { 3086 Node* casted_obj = record_profiled_receiver_for_speculation(obj); 3087 replace_in_map(obj, casted_obj); 3088 } 3089 3090 return _gvn.transform(phi); 3091 } 3092 3093 //-------------------------------gen_checkcast--------------------------------- 3094 // Generate a checkcast idiom. Used by both the checkcast bytecode and the 3095 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the 3096 // uncommon-trap paths work. Adjust stack after this call. 3097 // If failure_control is supplied and not null, it is filled in with 3098 // the control edge for the cast failure. Otherwise, an appropriate 3099 // uncommon trap or exception is thrown. 3100 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, 3101 Node* *failure_control) { 3102 kill_dead_locals(); // Benefit all the uncommon traps 3103 const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr(); 3104 const Type *toop = TypeOopPtr::make_from_klass(tk->klass()); 3105 3106 // Fast cutout: Check the case that the cast is vacuously true. 3107 // This detects the common cases where the test will short-circuit 3108 // away completely. We do this before we perform the null check, 3109 // because if the test is going to turn into zero code, we don't 3110 // want a residual null check left around. (Causes a slowdown, 3111 // for example, in some objArray manipulations, such as a[i]=a[j].) 3112 if (tk->singleton()) { 3113 const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr(); 3114 if (objtp != NULL && objtp->klass() != NULL) { 3115 switch (C->static_subtype_check(tk->klass(), objtp->klass())) { 3116 case Compile::SSC_always_true: 3117 // If we know the type check always succeed then we don't use 3118 // the profiling data at this bytecode. Don't lose it, feed it 3119 // to the type system as a speculative type. 3120 return record_profiled_receiver_for_speculation(obj); 3121 case Compile::SSC_always_false: 3122 // It needs a null check because a null will *pass* the cast check. 3123 // A non-null value will always produce an exception. 3124 return null_assert(obj); 3125 } 3126 } 3127 } 3128 3129 ciProfileData* data = NULL; 3130 bool safe_for_replace = false; 3131 if (failure_control == NULL) { // use MDO in regular case only 3132 assert(java_bc() == Bytecodes::_aastore || 3133 java_bc() == Bytecodes::_checkcast, 3134 "interpreter profiles type checks only for these BCs"); 3135 data = method()->method_data()->bci_to_data(bci()); 3136 safe_for_replace = true; 3137 } 3138 3139 // Make the merge point 3140 enum { _obj_path = 1, _null_path, PATH_LIMIT }; 3141 RegionNode* region = new RegionNode(PATH_LIMIT); 3142 Node* phi = new PhiNode(region, toop); 3143 C->set_has_split_ifs(true); // Has chance for split-if optimization 3144 3145 // Use null-cast information if it is available 3146 bool speculative_not_null = false; 3147 bool never_see_null = ((failure_control == NULL) // regular case only 3148 && seems_never_null(obj, data, speculative_not_null)); 3149 3150 // Null check; get casted pointer; set region slot 3 3151 Node* null_ctl = top(); 3152 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); 3153 3154 // If not_null_obj is dead, only null-path is taken 3155 if (stopped()) { // Doing instance-of on a NULL? 3156 set_control(null_ctl); 3157 return null(); 3158 } 3159 region->init_req(_null_path, null_ctl); 3160 phi ->init_req(_null_path, null()); // Set null path value 3161 if (null_ctl == top()) { 3162 // Do this eagerly, so that pattern matches like is_diamond_phi 3163 // will work even during parsing. 3164 assert(_null_path == PATH_LIMIT-1, "delete last"); 3165 region->del_req(_null_path); 3166 phi ->del_req(_null_path); 3167 } 3168 3169 Node* cast_obj = NULL; 3170 if (tk->klass_is_exact()) { 3171 // The following optimization tries to statically cast the speculative type of the object 3172 // (for example obtained during profiling) to the type of the superklass and then do a 3173 // dynamic check that the type of the object is what we expect. To work correctly 3174 // for checkcast and aastore the type of superklass should be exact. 3175 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 3176 // We may not have profiling here or it may not help us. If we have 3177 // a speculative type use it to perform an exact cast. 3178 ciKlass* spec_obj_type = obj_type->speculative_type(); 3179 if (spec_obj_type != NULL || data != NULL) { 3180 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); 3181 if (cast_obj != NULL) { 3182 if (failure_control != NULL) // failure is now impossible 3183 (*failure_control) = top(); 3184 // adjust the type of the phi to the exact klass: 3185 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR)); 3186 } 3187 } 3188 } 3189 3190 if (cast_obj == NULL) { 3191 // Load the object's klass 3192 Node* obj_klass = load_object_klass(not_null_obj); 3193 3194 // Generate the subtype check 3195 Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass ); 3196 3197 // Plug in success path into the merge 3198 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop)); 3199 // Failure path ends in uncommon trap (or may be dead - failure impossible) 3200 if (failure_control == NULL) { 3201 if (not_subtype_ctrl != top()) { // If failure is possible 3202 PreserveJVMState pjvms(this); 3203 set_control(not_subtype_ctrl); 3204 builtin_throw(Deoptimization::Reason_class_check, obj_klass); 3205 } 3206 } else { 3207 (*failure_control) = not_subtype_ctrl; 3208 } 3209 } 3210 3211 region->init_req(_obj_path, control()); 3212 phi ->init_req(_obj_path, cast_obj); 3213 3214 // A merge of NULL or Casted-NotNull obj 3215 Node* res = _gvn.transform(phi); 3216 3217 // Note I do NOT always 'replace_in_map(obj,result)' here. 3218 // if( tk->klass()->can_be_primary_super() ) 3219 // This means that if I successfully store an Object into an array-of-String 3220 // I 'forget' that the Object is really now known to be a String. I have to 3221 // do this because we don't have true union types for interfaces - if I store 3222 // a Baz into an array-of-Interface and then tell the optimizer it's an 3223 // Interface, I forget that it's also a Baz and cannot do Baz-like field 3224 // references to it. FIX THIS WHEN UNION TYPES APPEAR! 3225 // replace_in_map( obj, res ); 3226 3227 // Return final merged results 3228 set_control( _gvn.transform(region) ); 3229 record_for_igvn(region); 3230 3231 return record_profiled_receiver_for_speculation(res); 3232 } 3233 3234 //------------------------------next_monitor----------------------------------- 3235 // What number should be given to the next monitor? 3236 int GraphKit::next_monitor() { 3237 int current = jvms()->monitor_depth()* C->sync_stack_slots(); 3238 int next = current + C->sync_stack_slots(); 3239 // Keep the toplevel high water mark current: 3240 if (C->fixed_slots() < next) C->set_fixed_slots(next); 3241 return current; 3242 } 3243 3244 //------------------------------insert_mem_bar--------------------------------- 3245 // Memory barrier to avoid floating things around 3246 // The membar serves as a pinch point between both control and all memory slices. 3247 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) { 3248 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent); 3249 mb->init_req(TypeFunc::Control, control()); 3250 mb->init_req(TypeFunc::Memory, reset_memory()); 3251 Node* membar = _gvn.transform(mb); 3252 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control))); 3253 set_all_memory_call(membar); 3254 return membar; 3255 } 3256 3257 //-------------------------insert_mem_bar_volatile---------------------------- 3258 // Memory barrier to avoid floating things around 3259 // The membar serves as a pinch point between both control and memory(alias_idx). 3260 // If you want to make a pinch point on all memory slices, do not use this 3261 // function (even with AliasIdxBot); use insert_mem_bar() instead. 3262 Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) { 3263 // When Parse::do_put_xxx updates a volatile field, it appends a series 3264 // of MemBarVolatile nodes, one for *each* volatile field alias category. 3265 // The first membar is on the same memory slice as the field store opcode. 3266 // This forces the membar to follow the store. (Bug 6500685 broke this.) 3267 // All the other membars (for other volatile slices, including AliasIdxBot, 3268 // which stands for all unknown volatile slices) are control-dependent 3269 // on the first membar. This prevents later volatile loads or stores 3270 // from sliding up past the just-emitted store. 3271 3272 MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent); 3273 mb->set_req(TypeFunc::Control,control()); 3274 if (alias_idx == Compile::AliasIdxBot) { 3275 mb->set_req(TypeFunc::Memory, merged_memory()->base_memory()); 3276 } else { 3277 assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller"); 3278 mb->set_req(TypeFunc::Memory, memory(alias_idx)); 3279 } 3280 Node* membar = _gvn.transform(mb); 3281 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control))); 3282 if (alias_idx == Compile::AliasIdxBot) { 3283 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory))); 3284 } else { 3285 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx); 3286 } 3287 return membar; 3288 } 3289 3290 //------------------------------shared_lock------------------------------------ 3291 // Emit locking code. 3292 FastLockNode* GraphKit::shared_lock(Node* obj) { 3293 // bci is either a monitorenter bc or InvocationEntryBci 3294 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces 3295 assert(SynchronizationEntryBCI == InvocationEntryBci, ""); 3296 3297 if( !GenerateSynchronizationCode ) 3298 return NULL; // Not locking things? 3299 if (stopped()) // Dead monitor? 3300 return NULL; 3301 3302 assert(dead_locals_are_killed(), "should kill locals before sync. point"); 3303 3304 // Box the stack location 3305 Node* box = _gvn.transform(new BoxLockNode(next_monitor())); 3306 Node* mem = reset_memory(); 3307 3308 FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock(); 3309 if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) { 3310 // Create the counters for this fast lock. 3311 flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci 3312 } 3313 3314 // Create the rtm counters for this fast lock if needed. 3315 flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci 3316 3317 // Add monitor to debug info for the slow path. If we block inside the 3318 // slow path and de-opt, we need the monitor hanging around 3319 map()->push_monitor( flock ); 3320 3321 const TypeFunc *tf = LockNode::lock_type(); 3322 LockNode *lock = new LockNode(C, tf); 3323 3324 lock->init_req( TypeFunc::Control, control() ); 3325 lock->init_req( TypeFunc::Memory , mem ); 3326 lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o 3327 lock->init_req( TypeFunc::FramePtr, frameptr() ); 3328 lock->init_req( TypeFunc::ReturnAdr, top() ); 3329 3330 lock->init_req(TypeFunc::Parms + 0, obj); 3331 lock->init_req(TypeFunc::Parms + 1, box); 3332 lock->init_req(TypeFunc::Parms + 2, flock); 3333 add_safepoint_edges(lock); 3334 3335 lock = _gvn.transform( lock )->as_Lock(); 3336 3337 // lock has no side-effects, sets few values 3338 set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM); 3339 3340 insert_mem_bar(Op_MemBarAcquireLock); 3341 3342 // Add this to the worklist so that the lock can be eliminated 3343 record_for_igvn(lock); 3344 3345 #ifndef PRODUCT 3346 if (PrintLockStatistics) { 3347 // Update the counter for this lock. Don't bother using an atomic 3348 // operation since we don't require absolute accuracy. 3349 lock->create_lock_counter(map()->jvms()); 3350 increment_counter(lock->counter()->addr()); 3351 } 3352 #endif 3353 3354 return flock; 3355 } 3356 3357 3358 //------------------------------shared_unlock---------------------------------- 3359 // Emit unlocking code. 3360 void GraphKit::shared_unlock(Node* box, Node* obj) { 3361 // bci is either a monitorenter bc or InvocationEntryBci 3362 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces 3363 assert(SynchronizationEntryBCI == InvocationEntryBci, ""); 3364 3365 if( !GenerateSynchronizationCode ) 3366 return; 3367 if (stopped()) { // Dead monitor? 3368 map()->pop_monitor(); // Kill monitor from debug info 3369 return; 3370 } 3371 3372 // Memory barrier to avoid floating things down past the locked region 3373 insert_mem_bar(Op_MemBarReleaseLock); 3374 3375 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type(); 3376 UnlockNode *unlock = new UnlockNode(C, tf); 3377 #ifdef ASSERT 3378 unlock->set_dbg_jvms(sync_jvms()); 3379 #endif 3380 uint raw_idx = Compile::AliasIdxRaw; 3381 unlock->init_req( TypeFunc::Control, control() ); 3382 unlock->init_req( TypeFunc::Memory , memory(raw_idx) ); 3383 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o 3384 unlock->init_req( TypeFunc::FramePtr, frameptr() ); 3385 unlock->init_req( TypeFunc::ReturnAdr, top() ); 3386 3387 unlock->init_req(TypeFunc::Parms + 0, obj); 3388 unlock->init_req(TypeFunc::Parms + 1, box); 3389 unlock = _gvn.transform(unlock)->as_Unlock(); 3390 3391 Node* mem = reset_memory(); 3392 3393 // unlock has no side-effects, sets few values 3394 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM); 3395 3396 // Kill monitor from debug info 3397 map()->pop_monitor( ); 3398 } 3399 3400 //-------------------------------get_layout_helper----------------------------- 3401 // If the given klass is a constant or known to be an array, 3402 // fetch the constant layout helper value into constant_value 3403 // and return (Node*)NULL. Otherwise, load the non-constant 3404 // layout helper value, and return the node which represents it. 3405 // This two-faced routine is useful because allocation sites 3406 // almost always feature constant types. 3407 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { 3408 const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); 3409 if (!StressReflectiveCode && inst_klass != NULL) { 3410 ciKlass* klass = inst_klass->klass(); 3411 assert(klass != NULL, "klass should not be NULL"); 3412 bool xklass = inst_klass->klass_is_exact(); 3413 if (xklass || klass->is_array_klass()) { 3414 jint lhelper = klass->layout_helper(); 3415 if (lhelper != Klass::_lh_neutral_value) { 3416 constant_value = lhelper; 3417 return (Node*) NULL; 3418 } 3419 } 3420 } 3421 constant_value = Klass::_lh_neutral_value; // put in a known value 3422 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); 3423 return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); 3424 } 3425 3426 // We just put in an allocate/initialize with a big raw-memory effect. 3427 // Hook selected additional alias categories on the initialization. 3428 static void hook_memory_on_init(GraphKit& kit, int alias_idx, 3429 MergeMemNode* init_in_merge, 3430 Node* init_out_raw) { 3431 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory()); 3432 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, ""); 3433 3434 Node* prevmem = kit.memory(alias_idx); 3435 init_in_merge->set_memory_at(alias_idx, prevmem); 3436 kit.set_memory(init_out_raw, alias_idx); 3437 } 3438 3439 //---------------------------set_output_for_allocation------------------------- 3440 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, 3441 const TypeOopPtr* oop_type, 3442 bool deoptimize_on_exception) { 3443 int rawidx = Compile::AliasIdxRaw; 3444 alloc->set_req( TypeFunc::FramePtr, frameptr() ); 3445 add_safepoint_edges(alloc); 3446 Node* allocx = _gvn.transform(alloc); 3447 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) ); 3448 // create memory projection for i_o 3449 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx ); 3450 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception); 3451 3452 // create a memory projection as for the normal control path 3453 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory)); 3454 set_memory(malloc, rawidx); 3455 3456 // a normal slow-call doesn't change i_o, but an allocation does 3457 // we create a separate i_o projection for the normal control path 3458 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) ); 3459 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) ); 3460 3461 // put in an initialization barrier 3462 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx, 3463 rawoop)->as_Initialize(); 3464 assert(alloc->initialization() == init, "2-way macro link must work"); 3465 assert(init ->allocation() == alloc, "2-way macro link must work"); 3466 { 3467 // Extract memory strands which may participate in the new object's 3468 // initialization, and source them from the new InitializeNode. 3469 // This will allow us to observe initializations when they occur, 3470 // and link them properly (as a group) to the InitializeNode. 3471 assert(init->in(InitializeNode::Memory) == malloc, ""); 3472 MergeMemNode* minit_in = MergeMemNode::make(malloc); 3473 init->set_req(InitializeNode::Memory, minit_in); 3474 record_for_igvn(minit_in); // fold it up later, if possible 3475 Node* minit_out = memory(rawidx); 3476 assert(minit_out->is_Proj() && minit_out->in(0) == init, ""); 3477 if (oop_type->isa_aryptr()) { 3478 const TypeAryPtr* arytype = oop_type->is_aryptr(); 3479 if (arytype->klass()->is_value_array_klass()) { 3480 ciValueArrayKlass* vak = arytype->klass()->as_value_array_klass(); 3481 ciValueKlass* vk = vak->element_klass()->as_value_klass(); 3482 for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) { 3483 ciField* field = vk->nonstatic_field_at(i); 3484 if (field->offset() >= TrackedInitializationLimit * HeapWordSize) 3485 continue; // do not bother to track really large numbers of fields 3486 int off_in_vt = field->offset() - vk->first_field_offset(); 3487 const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot); 3488 int fieldidx = C->get_alias_index(adr_type); 3489 hook_memory_on_init(*this, fieldidx, minit_in, minit_out); 3490 } 3491 } else { 3492 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot); 3493 int elemidx = C->get_alias_index(telemref); 3494 hook_memory_on_init(*this, elemidx, minit_in, minit_out); 3495 } 3496 } else if (oop_type->isa_instptr() || oop_type->isa_valuetypeptr()) { 3497 ciInstanceKlass* ik = oop_type->klass()->as_instance_klass(); 3498 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) { 3499 ciField* field = ik->nonstatic_field_at(i); 3500 if (field->offset() >= TrackedInitializationLimit * HeapWordSize) 3501 continue; // do not bother to track really large numbers of fields 3502 // Find (or create) the alias category for this field: 3503 int fieldidx = C->alias_type(field)->index(); 3504 hook_memory_on_init(*this, fieldidx, minit_in, minit_out); 3505 } 3506 } 3507 } 3508 3509 // Cast raw oop to the real thing... 3510 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type); 3511 javaoop = _gvn.transform(javaoop); 3512 C->set_recent_alloc(control(), javaoop); 3513 assert(just_allocated_object(control()) == javaoop, "just allocated"); 3514 3515 #ifdef ASSERT 3516 { // Verify that the AllocateNode::Ideal_allocation recognizers work: 3517 assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc, 3518 "Ideal_allocation works"); 3519 assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc, 3520 "Ideal_allocation works"); 3521 if (alloc->is_AllocateArray()) { 3522 assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(), 3523 "Ideal_allocation works"); 3524 assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(), 3525 "Ideal_allocation works"); 3526 } else { 3527 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please"); 3528 } 3529 } 3530 #endif //ASSERT 3531 3532 return javaoop; 3533 } 3534 3535 //---------------------------new_instance-------------------------------------- 3536 // This routine takes a klass_node which may be constant (for a static type) 3537 // or may be non-constant (for reflective code). It will work equally well 3538 // for either, and the graph will fold nicely if the optimizer later reduces 3539 // the type to a constant. 3540 // The optional arguments are for specialized use by intrinsics: 3541 // - If 'extra_slow_test' if not null is an extra condition for the slow-path. 3542 // - If 'return_size_val', report the the total object size to the caller. 3543 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize) 3544 Node* GraphKit::new_instance(Node* klass_node, 3545 Node* extra_slow_test, 3546 Node* *return_size_val, 3547 bool deoptimize_on_exception, 3548 ValueTypeBaseNode* value_node) { 3549 // Compute size in doublewords 3550 // The size is always an integral number of doublewords, represented 3551 // as a positive bytewise size stored in the klass's layout_helper. 3552 // The layout_helper also encodes (in a low bit) the need for a slow path. 3553 jint layout_con = Klass::_lh_neutral_value; 3554 Node* layout_val = get_layout_helper(klass_node, layout_con); 3555 bool layout_is_con = (layout_val == NULL); 3556 3557 if (extra_slow_test == NULL) extra_slow_test = intcon(0); 3558 // Generate the initial go-slow test. It's either ALWAYS (return a 3559 // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective 3560 // case) a computed value derived from the layout_helper. 3561 Node* initial_slow_test = NULL; 3562 if (layout_is_con) { 3563 assert(!StressReflectiveCode, "stress mode does not use these paths"); 3564 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con); 3565 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test; 3566 } else { // reflective case 3567 // This reflective path is used by Unsafe.allocateInstance. 3568 // (It may be stress-tested by specifying StressReflectiveCode.) 3569 // Basically, we want to get into the VM is there's an illegal argument. 3570 Node* bit = intcon(Klass::_lh_instance_slow_path_bit); 3571 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) ); 3572 if (extra_slow_test != intcon(0)) { 3573 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) ); 3574 } 3575 // (Macro-expander will further convert this to a Bool, if necessary.) 3576 } 3577 3578 // Find the size in bytes. This is easy; it's the layout_helper. 3579 // The size value must be valid even if the slow path is taken. 3580 Node* size = NULL; 3581 if (layout_is_con) { 3582 size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con)); 3583 } else { // reflective case 3584 // This reflective path is used by clone and Unsafe.allocateInstance. 3585 size = ConvI2X(layout_val); 3586 3587 // Clear the low bits to extract layout_helper_size_in_bytes: 3588 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit"); 3589 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong)); 3590 size = _gvn.transform( new AndXNode(size, mask) ); 3591 } 3592 if (return_size_val != NULL) { 3593 (*return_size_val) = size; 3594 } 3595 3596 // This is a precise notnull oop of the klass. 3597 // (Actually, it need not be precise if this is a reflective allocation.) 3598 // It's what we cast the result to. 3599 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr(); 3600 if (!tklass) tklass = TypeKlassPtr::OBJECT; 3601 const TypeOopPtr* oop_type = tklass->as_instance_type(); 3602 3603 // Now generate allocation code 3604 3605 // The entire memory state is needed for slow path of the allocation 3606 // since GC and deoptimization can happen. 3607 Node *mem = reset_memory(); 3608 set_all_memory(mem); // Create new memory state 3609 3610 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), 3611 control(), mem, i_o(), 3612 size, klass_node, 3613 initial_slow_test, value_node); 3614 3615 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception); 3616 } 3617 3618 //-------------------------------new_array------------------------------------- 3619 // helper for newarray and anewarray 3620 // The 'length' parameter is (obviously) the length of the array. 3621 // See comments on new_instance for the meaning of the other arguments. 3622 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) 3623 Node* length, // number of array elements 3624 int nargs, // number of arguments to push back for uncommon trap 3625 Node* *return_size_val, 3626 bool deoptimize_on_exception) { 3627 jint layout_con = Klass::_lh_neutral_value; 3628 Node* layout_val = get_layout_helper(klass_node, layout_con); 3629 bool layout_is_con = (layout_val == NULL); 3630 3631 if (!layout_is_con && !StressReflectiveCode && 3632 !too_many_traps(Deoptimization::Reason_class_check)) { 3633 // This is a reflective array creation site. 3634 // Optimistically assume that it is a subtype of Object[], 3635 // so that we can fold up all the address arithmetic. 3636 layout_con = Klass::array_layout_helper(T_OBJECT); 3637 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) ); 3638 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) ); 3639 { BuildCutout unless(this, bol_lh, PROB_MAX); 3640 inc_sp(nargs); 3641 uncommon_trap(Deoptimization::Reason_class_check, 3642 Deoptimization::Action_maybe_recompile); 3643 } 3644 layout_val = NULL; 3645 layout_is_con = true; 3646 } 3647 3648 // Generate the initial go-slow test. Make sure we do not overflow 3649 // if length is huge (near 2Gig) or negative! We do not need 3650 // exact double-words here, just a close approximation of needed 3651 // double-words. We can't add any offset or rounding bits, lest we 3652 // take a size -1 of bytes and make it positive. Use an unsigned 3653 // compare, so negative sizes look hugely positive. 3654 int fast_size_limit = FastAllocateSizeLimit; 3655 if (layout_is_con) { 3656 assert(!StressReflectiveCode, "stress mode does not use these paths"); 3657 // Increase the size limit if we have exact knowledge of array type. 3658 int log2_esize = Klass::layout_helper_log2_element_size(layout_con); 3659 fast_size_limit <<= (LogBytesPerLong - log2_esize); 3660 } 3661 3662 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) ); 3663 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) ); 3664 3665 // --- Size Computation --- 3666 // array_size = round_to_heap(array_header + (length << elem_shift)); 3667 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes) 3668 // and align_to(x, y) == ((x + y-1) & ~(y-1)) 3669 // The rounding mask is strength-reduced, if possible. 3670 int round_mask = MinObjAlignmentInBytes - 1; 3671 Node* header_size = NULL; 3672 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); 3673 // (T_BYTE has the weakest alignment and size restrictions...) 3674 if (layout_is_con) { 3675 int hsize = Klass::layout_helper_header_size(layout_con); 3676 int eshift = Klass::layout_helper_log2_element_size(layout_con); 3677 BasicType etype = Klass::layout_helper_element_type(layout_con); 3678 bool is_value_array = Klass::layout_helper_is_valueArray(layout_con); 3679 if ((round_mask & ~right_n_bits(eshift)) == 0) 3680 round_mask = 0; // strength-reduce it if it goes away completely 3681 assert(is_value_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded"); 3682 assert(header_size_min <= hsize, "generic minimum is smallest"); 3683 header_size_min = hsize; 3684 header_size = intcon(hsize + round_mask); 3685 } else { 3686 Node* hss = intcon(Klass::_lh_header_size_shift); 3687 Node* hsm = intcon(Klass::_lh_header_size_mask); 3688 Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) ); 3689 hsize = _gvn.transform( new AndINode(hsize, hsm) ); 3690 Node* mask = intcon(round_mask); 3691 header_size = _gvn.transform( new AddINode(hsize, mask) ); 3692 } 3693 3694 Node* elem_shift = NULL; 3695 if (layout_is_con) { 3696 int eshift = Klass::layout_helper_log2_element_size(layout_con); 3697 if (eshift != 0) 3698 elem_shift = intcon(eshift); 3699 } else { 3700 // There is no need to mask or shift this value. 3701 // The semantics of LShiftINode include an implicit mask to 0x1F. 3702 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); 3703 elem_shift = layout_val; 3704 } 3705 3706 // Transition to native address size for all offset calculations: 3707 Node* lengthx = ConvI2X(length); 3708 Node* headerx = ConvI2X(header_size); 3709 #ifdef _LP64 3710 { const TypeInt* tilen = _gvn.find_int_type(length); 3711 if (tilen != NULL && tilen->_lo < 0) { 3712 // Add a manual constraint to a positive range. Cf. array_element_address. 3713 jint size_max = fast_size_limit; 3714 if (size_max > tilen->_hi) size_max = tilen->_hi; 3715 const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin); 3716 3717 // Only do a narrow I2L conversion if the range check passed. 3718 IfNode* iff = new IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN); 3719 _gvn.transform(iff); 3720 RegionNode* region = new RegionNode(3); 3721 _gvn.set_type(region, Type::CONTROL); 3722 lengthx = new PhiNode(region, TypeLong::LONG); 3723 _gvn.set_type(lengthx, TypeLong::LONG); 3724 3725 // Range check passed. Use ConvI2L node with narrow type. 3726 Node* passed = IfFalse(iff); 3727 region->init_req(1, passed); 3728 // Make I2L conversion control dependent to prevent it from 3729 // floating above the range check during loop optimizations. 3730 lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed)); 3731 3732 // Range check failed. Use ConvI2L with wide type because length may be invalid. 3733 region->init_req(2, IfTrue(iff)); 3734 lengthx->init_req(2, ConvI2X(length)); 3735 3736 set_control(region); 3737 record_for_igvn(region); 3738 record_for_igvn(lengthx); 3739 } 3740 } 3741 #endif 3742 3743 // Combine header size (plus rounding) and body size. Then round down. 3744 // This computation cannot overflow, because it is used only in two 3745 // places, one where the length is sharply limited, and the other 3746 // after a successful allocation. 3747 Node* abody = lengthx; 3748 if (elem_shift != NULL) 3749 abody = _gvn.transform( new LShiftXNode(lengthx, elem_shift) ); 3750 Node* size = _gvn.transform( new AddXNode(headerx, abody) ); 3751 if (round_mask != 0) { 3752 Node* mask = MakeConX(~round_mask); 3753 size = _gvn.transform( new AndXNode(size, mask) ); 3754 } 3755 // else if round_mask == 0, the size computation is self-rounding 3756 3757 if (return_size_val != NULL) { 3758 // This is the size 3759 (*return_size_val) = size; 3760 } 3761 3762 // Now generate allocation code 3763 3764 // The entire memory state is needed for slow path of the allocation 3765 // since GC and deoptimization can happen. 3766 Node *mem = reset_memory(); 3767 set_all_memory(mem); // Create new memory state 3768 3769 if (initial_slow_test->is_Bool()) { 3770 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick. 3771 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn); 3772 } 3773 3774 // Create the AllocateArrayNode and its result projections 3775 AllocateArrayNode* alloc 3776 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT), 3777 control(), mem, i_o(), 3778 size, klass_node, 3779 initial_slow_test, 3780 length); 3781 3782 // Cast to correct type. Note that the klass_node may be constant or not, 3783 // and in the latter case the actual array type will be inexact also. 3784 // (This happens via a non-constant argument to inline_native_newArray.) 3785 // In any case, the value of klass_node provides the desired array type. 3786 const TypeInt* length_type = _gvn.find_int_type(length); 3787 const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type(); 3788 if (ary_type->isa_aryptr() && length_type != NULL) { 3789 // Try to get a better type than POS for the size 3790 ary_type = ary_type->is_aryptr()->cast_to_size(length_type); 3791 } 3792 3793 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception); 3794 3795 // Cast length on remaining path to be as narrow as possible 3796 if (map()->find_edge(length) >= 0) { 3797 Node* ccast = alloc->make_ideal_length(ary_type, &_gvn); 3798 if (ccast != length) { 3799 _gvn.set_type_bottom(ccast); 3800 record_for_igvn(ccast); 3801 replace_in_map(length, ccast); 3802 } 3803 } 3804 3805 const TypeAryPtr* ary_ptr = ary_type->isa_aryptr(); 3806 ciKlass* elem_klass = ary_ptr != NULL ? ary_ptr->klass()->as_array_klass()->element_klass() : NULL; 3807 if (elem_klass != NULL && elem_klass->is_valuetype()) { 3808 ciValueKlass* vk = elem_klass->as_value_klass(); 3809 if (!vk->flatten_array()) { 3810 // Non-flattened value type arrays need to be initialized with default value type oops 3811 initialize_value_type_array(javaoop, length, elem_klass->as_value_klass(), nargs); 3812 // TODO re-enable once JDK-8189802 is fixed 3813 // InitializeNode* init = alloc->initialization(); 3814 //init->set_complete_with_arraycopy(); 3815 } 3816 } 3817 3818 return javaoop; 3819 } 3820 3821 void GraphKit::initialize_value_type_array(Node* array, Node* length, ciValueKlass* vk, int nargs) { 3822 // Check for zero length 3823 Node* null_ctl = top(); 3824 null_check_common(length, T_INT, false, &null_ctl, false); 3825 if (stopped()) { 3826 set_control(null_ctl); // Always zero 3827 return; 3828 } 3829 3830 RegionNode* res_ctl = new RegionNode(3); 3831 gvn().set_type(res_ctl, Type::CONTROL); 3832 record_for_igvn(res_ctl); 3833 3834 // Length is zero: don't execute initialization loop 3835 res_ctl->init_req(1, null_ctl); 3836 PhiNode* res_io = PhiNode::make(res_ctl, i_o(), Type::ABIO); 3837 PhiNode* res_mem = PhiNode::make(res_ctl, merged_memory(), Type::MEMORY, TypePtr::BOTTOM); 3838 gvn().set_type(res_io, Type::ABIO); 3839 gvn().set_type(res_mem, Type::MEMORY); 3840 record_for_igvn(res_io); 3841 record_for_igvn(res_mem); 3842 3843 // Length is non-zero: execute a loop that initializes the array with the default value type 3844 Node* oop = ValueTypeNode::load_default_oop(gvn(), vk); 3845 3846 add_predicate(nargs); 3847 RegionNode* loop = new RegionNode(3); 3848 loop->init_req(1, control()); 3849 PhiNode* index = PhiNode::make(loop, intcon(0), TypeInt::INT); 3850 PhiNode* mem = PhiNode::make(loop, reset_memory(), Type::MEMORY, TypePtr::BOTTOM); 3851 3852 gvn().set_type(loop, Type::CONTROL); 3853 gvn().set_type(index, TypeInt::INT); 3854 gvn().set_type(mem, Type::MEMORY); 3855 record_for_igvn(loop); 3856 record_for_igvn(index); 3857 record_for_igvn(mem); 3858 3859 // Loop body: initialize array element at 'index' 3860 set_control(loop); 3861 set_all_memory(mem); 3862 Node* adr = array_element_address(array, index, T_OBJECT); 3863 const TypeOopPtr* elemtype = TypeValueTypePtr::make(TypePtr::NotNull, vk); 3864 store_oop_to_array(control(), array, adr, TypeAryPtr::OOPS, oop, elemtype, T_VALUETYPE, MemNode::release); 3865 3866 // Check if we need to execute another loop iteration 3867 length = SubI(length, intcon(1)); 3868 IfNode* iff = create_and_map_if(control(), Bool(CmpI(index, length), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN); 3869 3870 // Continue with next iteration 3871 loop->init_req(2, IfTrue(iff)); 3872 index->init_req(2, AddI(index, intcon(1))); 3873 mem->init_req(2, merged_memory()); 3874 3875 // Exit loop 3876 res_ctl->init_req(2, IfFalse(iff)); 3877 res_io->set_req(2, i_o()); 3878 res_mem->set_req(2, reset_memory()); 3879 3880 // Set merged control, IO and memory 3881 set_control(res_ctl); 3882 set_i_o(res_io); 3883 set_all_memory(res_mem); 3884 } 3885 3886 // The following "Ideal_foo" functions are placed here because they recognize 3887 // the graph shapes created by the functions immediately above. 3888 3889 //---------------------------Ideal_allocation---------------------------------- 3890 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode. 3891 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) { 3892 if (ptr == NULL) { // reduce dumb test in callers 3893 return NULL; 3894 } 3895 if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast 3896 ptr = ptr->in(1); 3897 if (ptr == NULL) return NULL; 3898 } 3899 // Return NULL for allocations with several casts: 3900 // j.l.reflect.Array.newInstance(jobject, jint) 3901 // Object.clone() 3902 // to keep more precise type from last cast. 3903 if (ptr->is_Proj()) { 3904 Node* allo = ptr->in(0); 3905 if (allo != NULL && allo->is_Allocate()) { 3906 return allo->as_Allocate(); 3907 } 3908 } 3909 // Report failure to match. 3910 return NULL; 3911 } 3912 3913 // Fancy version which also strips off an offset (and reports it to caller). 3914 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase, 3915 intptr_t& offset) { 3916 Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset); 3917 if (base == NULL) return NULL; 3918 return Ideal_allocation(base, phase); 3919 } 3920 3921 // Trace Initialize <- Proj[Parm] <- Allocate 3922 AllocateNode* InitializeNode::allocation() { 3923 Node* rawoop = in(InitializeNode::RawAddress); 3924 if (rawoop->is_Proj()) { 3925 Node* alloc = rawoop->in(0); 3926 if (alloc->is_Allocate()) { 3927 return alloc->as_Allocate(); 3928 } 3929 } 3930 return NULL; 3931 } 3932 3933 // Trace Allocate -> Proj[Parm] -> Initialize 3934 InitializeNode* AllocateNode::initialization() { 3935 ProjNode* rawoop = proj_out_or_null(AllocateNode::RawAddress); 3936 if (rawoop == NULL) return NULL; 3937 for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) { 3938 Node* init = rawoop->fast_out(i); 3939 if (init->is_Initialize()) { 3940 assert(init->as_Initialize()->allocation() == this, "2-way link"); 3941 return init->as_Initialize(); 3942 } 3943 } 3944 return NULL; 3945 } 3946 3947 //----------------------------- loop predicates --------------------------- 3948 3949 //------------------------------add_predicate_impl---------------------------- 3950 void GraphKit::add_predicate_impl(Deoptimization::DeoptReason reason, int nargs) { 3951 // Too many traps seen? 3952 if (too_many_traps(reason)) { 3953 #ifdef ASSERT 3954 if (TraceLoopPredicate) { 3955 int tc = C->trap_count(reason); 3956 tty->print("too many traps=%s tcount=%d in ", 3957 Deoptimization::trap_reason_name(reason), tc); 3958 method()->print(); // which method has too many predicate traps 3959 tty->cr(); 3960 } 3961 #endif 3962 // We cannot afford to take more traps here, 3963 // do not generate predicate. 3964 return; 3965 } 3966 3967 Node *cont = _gvn.intcon(1); 3968 Node* opq = _gvn.transform(new Opaque1Node(C, cont)); 3969 Node *bol = _gvn.transform(new Conv2BNode(opq)); 3970 IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); 3971 Node* iffalse = _gvn.transform(new IfFalseNode(iff)); 3972 C->add_predicate_opaq(opq); 3973 { 3974 PreserveJVMState pjvms(this); 3975 set_control(iffalse); 3976 inc_sp(nargs); 3977 uncommon_trap(reason, Deoptimization::Action_maybe_recompile); 3978 } 3979 Node* iftrue = _gvn.transform(new IfTrueNode(iff)); 3980 set_control(iftrue); 3981 } 3982 3983 //------------------------------add_predicate--------------------------------- 3984 void GraphKit::add_predicate(int nargs) { 3985 if (UseLoopPredicate) { 3986 add_predicate_impl(Deoptimization::Reason_predicate, nargs); 3987 } 3988 // loop's limit check predicate should be near the loop. 3989 add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs); 3990 } 3991 3992 //----------------------------- store barriers ---------------------------- 3993 #define __ ideal. 3994 3995 void GraphKit::sync_kit(IdealKit& ideal) { 3996 set_all_memory(__ merged_memory()); 3997 set_i_o(__ i_o()); 3998 set_control(__ ctrl()); 3999 } 4000 4001 void GraphKit::final_sync(IdealKit& ideal) { 4002 // Final sync IdealKit and graphKit. 4003 sync_kit(ideal); 4004 } 4005 4006 Node* GraphKit::byte_map_base_node() { 4007 // Get base of card map 4008 CardTableModRefBS* ct = 4009 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); 4010 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code"); 4011 if (ct->byte_map_base != NULL) { 4012 return makecon(TypeRawPtr::make((address)ct->byte_map_base)); 4013 } else { 4014 return null(); 4015 } 4016 } 4017 4018 // vanilla/CMS post barrier 4019 // Insert a write-barrier store. This is to let generational GC work; we have 4020 // to flag all oop-stores before the next GC point. 4021 void GraphKit::write_barrier_post(Node* oop_store, 4022 Node* obj, 4023 Node* adr, 4024 uint adr_idx, 4025 Node* val, 4026 bool use_precise) { 4027 // No store check needed if we're storing a NULL or an old object 4028 // (latter case is probably a string constant). The concurrent 4029 // mark sweep garbage collector, however, needs to have all nonNull 4030 // oop updates flagged via card-marks. 4031 if (val != NULL && val->is_Con()) { 4032 // must be either an oop or NULL 4033 const Type* t = val->bottom_type(); 4034 if (t == TypePtr::NULL_PTR || t == Type::TOP) 4035 // stores of null never (?) need barriers 4036 return; 4037 } 4038 4039 if (use_ReduceInitialCardMarks() 4040 && obj == just_allocated_object(control())) { 4041 // We can skip marks on a freshly-allocated object in Eden. 4042 // Keep this code in sync with new_store_pre_barrier() in runtime.cpp. 4043 // That routine informs GC to take appropriate compensating steps, 4044 // upon a slow-path allocation, so as to make this card-mark 4045 // elision safe. 4046 return; 4047 } 4048 4049 if (!use_precise) { 4050 // All card marks for a (non-array) instance are in one place: 4051 adr = obj; 4052 } 4053 // (Else it's an array (or unknown), and we want more precise card marks.) 4054 assert(adr != NULL, ""); 4055 4056 IdealKit ideal(this, true); 4057 4058 // Convert the pointer to an int prior to doing math on it 4059 Node* cast = __ CastPX(__ ctrl(), adr); 4060 4061 // Divide by card size 4062 assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef), 4063 "Only one we handle so far."); 4064 Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) ); 4065 4066 // Combine card table base and card offset 4067 Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset ); 4068 4069 // Get the alias_index for raw card-mark memory 4070 int adr_type = Compile::AliasIdxRaw; 4071 Node* zero = __ ConI(0); // Dirty card value 4072 BasicType bt = T_BYTE; 4073 4074 if (UseConcMarkSweepGC && UseCondCardMark) { 4075 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier 4076 __ sync_kit(this); 4077 } 4078 4079 if (UseCondCardMark) { 4080 // The classic GC reference write barrier is typically implemented 4081 // as a store into the global card mark table. Unfortunately 4082 // unconditional stores can result in false sharing and excessive 4083 // coherence traffic as well as false transactional aborts. 4084 // UseCondCardMark enables MP "polite" conditional card mark 4085 // stores. In theory we could relax the load from ctrl() to 4086 // no_ctrl, but that doesn't buy much latitude. 4087 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type); 4088 __ if_then(card_val, BoolTest::ne, zero); 4089 } 4090 4091 // Smash zero into card 4092 if( !UseConcMarkSweepGC ) { 4093 __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered); 4094 } else { 4095 // Specialized path for CM store barrier 4096 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type); 4097 } 4098 4099 if (UseCondCardMark) { 4100 __ end_if(); 4101 } 4102 4103 // Final sync IdealKit and GraphKit. 4104 final_sync(ideal); 4105 } 4106 /* 4107 * Determine if the G1 pre-barrier can be removed. The pre-barrier is 4108 * required by SATB to make sure all objects live at the start of the 4109 * marking are kept alive, all reference updates need to any previous 4110 * reference stored before writing. 4111 * 4112 * If the previous value is NULL there is no need to save the old value. 4113 * References that are NULL are filtered during runtime by the barrier 4114 * code to avoid unnecessary queuing. 4115 * 4116 * However in the case of newly allocated objects it might be possible to 4117 * prove that the reference about to be overwritten is NULL during compile 4118 * time and avoid adding the barrier code completely. 4119 * 4120 * The compiler needs to determine that the object in which a field is about 4121 * to be written is newly allocated, and that no prior store to the same field 4122 * has happened since the allocation. 4123 * 4124 * Returns true if the pre-barrier can be removed 4125 */ 4126 bool GraphKit::g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr, 4127 BasicType bt, uint adr_idx) { 4128 intptr_t offset = 0; 4129 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 4130 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); 4131 4132 if (offset == Type::OffsetBot) { 4133 return false; // cannot unalias unless there are precise offsets 4134 } 4135 4136 if (alloc == NULL) { 4137 return false; // No allocation found 4138 } 4139 4140 intptr_t size_in_bytes = type2aelembytes(bt); 4141 4142 Node* mem = memory(adr_idx); // start searching here... 4143 4144 for (int cnt = 0; cnt < 50; cnt++) { 4145 4146 if (mem->is_Store()) { 4147 4148 Node* st_adr = mem->in(MemNode::Address); 4149 intptr_t st_offset = 0; 4150 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 4151 4152 if (st_base == NULL) { 4153 break; // inscrutable pointer 4154 } 4155 4156 // Break we have found a store with same base and offset as ours so break 4157 if (st_base == base && st_offset == offset) { 4158 break; 4159 } 4160 4161 if (st_offset != offset && st_offset != Type::OffsetBot) { 4162 const int MAX_STORE = BytesPerLong; 4163 if (st_offset >= offset + size_in_bytes || 4164 st_offset <= offset - MAX_STORE || 4165 st_offset <= offset - mem->as_Store()->memory_size()) { 4166 // Success: The offsets are provably independent. 4167 // (You may ask, why not just test st_offset != offset and be done? 4168 // The answer is that stores of different sizes can co-exist 4169 // in the same sequence of RawMem effects. We sometimes initialize 4170 // a whole 'tile' of array elements with a single jint or jlong.) 4171 mem = mem->in(MemNode::Memory); 4172 continue; // advance through independent store memory 4173 } 4174 } 4175 4176 if (st_base != base 4177 && MemNode::detect_ptr_independence(base, alloc, st_base, 4178 AllocateNode::Ideal_allocation(st_base, phase), 4179 phase)) { 4180 // Success: The bases are provably independent. 4181 mem = mem->in(MemNode::Memory); 4182 continue; // advance through independent store memory 4183 } 4184 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 4185 4186 InitializeNode* st_init = mem->in(0)->as_Initialize(); 4187 AllocateNode* st_alloc = st_init->allocation(); 4188 4189 // Make sure that we are looking at the same allocation site. 4190 // The alloc variable is guaranteed to not be null here from earlier check. 4191 if (alloc == st_alloc) { 4192 // Check that the initialization is storing NULL so that no previous store 4193 // has been moved up and directly write a reference 4194 Node* captured_store = st_init->find_captured_store(offset, 4195 type2aelembytes(T_OBJECT), 4196 phase); 4197 if (captured_store == NULL || captured_store == st_init->zero_memory()) { 4198 return true; 4199 } 4200 } 4201 } 4202 4203 // Unless there is an explicit 'continue', we must bail out here, 4204 // because 'mem' is an inscrutable memory state (e.g., a call). 4205 break; 4206 } 4207 4208 return false; 4209 } 4210 4211 // G1 pre/post barriers 4212 void GraphKit::g1_write_barrier_pre(bool do_load, 4213 Node* obj, 4214 Node* adr, 4215 uint alias_idx, 4216 Node* val, 4217 const TypeOopPtr* val_type, 4218 Node* pre_val, 4219 BasicType bt) { 4220 4221 // Some sanity checks 4222 // Note: val is unused in this routine. 4223 4224 if (do_load) { 4225 // We need to generate the load of the previous value 4226 assert(obj != NULL, "must have a base"); 4227 assert(adr != NULL, "where are loading from?"); 4228 assert(pre_val == NULL, "loaded already?"); 4229 assert(val_type != NULL, "need a type"); 4230 4231 if (use_ReduceInitialCardMarks() 4232 && g1_can_remove_pre_barrier(&_gvn, adr, bt, alias_idx)) { 4233 return; 4234 } 4235 4236 } else { 4237 // In this case both val_type and alias_idx are unused. 4238 assert(pre_val != NULL, "must be loaded already"); 4239 // Nothing to be done if pre_val is null. 4240 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return; 4241 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); 4242 } 4243 assert(bt == T_OBJECT || bt == T_VALUETYPE, "or we shouldn't be here"); 4244 4245 IdealKit ideal(this, true); 4246 4247 Node* tls = __ thread(); // ThreadLocalStorage 4248 4249 Node* no_ctrl = NULL; 4250 Node* no_base = __ top(); 4251 Node* zero = __ ConI(0); 4252 Node* zeroX = __ ConX(0); 4253 4254 float likely = PROB_LIKELY(0.999); 4255 float unlikely = PROB_UNLIKELY(0.999); 4256 4257 BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE; 4258 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width"); 4259 4260 // Offsets into the thread 4261 const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 648 4262 SATBMarkQueue::byte_offset_of_active()); 4263 const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 656 4264 SATBMarkQueue::byte_offset_of_index()); 4265 const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652 4266 SATBMarkQueue::byte_offset_of_buf()); 4267 4268 // Now the actual pointers into the thread 4269 Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset)); 4270 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 4271 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 4272 4273 // Now some of the values 4274 Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); 4275 4276 // if (!marking) 4277 __ if_then(marking, BoolTest::ne, zero, unlikely); { 4278 BasicType index_bt = TypeX_X->basic_type(); 4279 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size."); 4280 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); 4281 4282 if (do_load) { 4283 // load original value 4284 // alias_idx correct?? 4285 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx); 4286 } 4287 4288 // if (pre_val != NULL) 4289 __ if_then(pre_val, BoolTest::ne, null()); { 4290 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 4291 4292 // is the queue for this thread full? 4293 __ if_then(index, BoolTest::ne, zeroX, likely); { 4294 4295 // decrement the index 4296 Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 4297 4298 // Now get the buffer location we will log the previous value into and store it 4299 Node *log_addr = __ AddP(no_base, buffer, next_index); 4300 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); 4301 // update the index 4302 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); 4303 4304 } __ else_(); { 4305 4306 // logging buffer is full, call the runtime 4307 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type(); 4308 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls); 4309 } __ end_if(); // (!index) 4310 } __ end_if(); // (pre_val != NULL) 4311 } __ end_if(); // (!marking) 4312 4313 // Final sync IdealKit and GraphKit. 4314 final_sync(ideal); 4315 } 4316 4317 /* 4318 * G1 similar to any GC with a Young Generation requires a way to keep track of 4319 * references from Old Generation to Young Generation to make sure all live 4320 * objects are found. G1 also requires to keep track of object references 4321 * between different regions to enable evacuation of old regions, which is done 4322 * as part of mixed collections. References are tracked in remembered sets and 4323 * is continuously updated as reference are written to with the help of the 4324 * post-barrier. 4325 * 4326 * To reduce the number of updates to the remembered set the post-barrier 4327 * filters updates to fields in objects located in the Young Generation, 4328 * the same region as the reference, when the NULL is being written or 4329 * if the card is already marked as dirty by an earlier write. 4330 * 4331 * Under certain circumstances it is possible to avoid generating the 4332 * post-barrier completely if it is possible during compile time to prove 4333 * the object is newly allocated and that no safepoint exists between the 4334 * allocation and the store. 4335 * 4336 * In the case of slow allocation the allocation code must handle the barrier 4337 * as part of the allocation in the case the allocated object is not located 4338 * in the nursery, this would happen for humongous objects. This is similar to 4339 * how CMS is required to handle this case, see the comments for the method 4340 * CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier. 4341 * A deferred card mark is required for these objects and handled in the above 4342 * mentioned methods. 4343 * 4344 * Returns true if the post barrier can be removed 4345 */ 4346 bool GraphKit::g1_can_remove_post_barrier(PhaseTransform* phase, Node* store, 4347 Node* adr) { 4348 intptr_t offset = 0; 4349 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); 4350 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); 4351 4352 if (offset == Type::OffsetBot) { 4353 return false; // cannot unalias unless there are precise offsets 4354 } 4355 4356 if (alloc == NULL) { 4357 return false; // No allocation found 4358 } 4359 4360 // Start search from Store node 4361 Node* mem = store->in(MemNode::Control); 4362 if (mem->is_Proj() && mem->in(0)->is_Initialize()) { 4363 4364 InitializeNode* st_init = mem->in(0)->as_Initialize(); 4365 AllocateNode* st_alloc = st_init->allocation(); 4366 4367 // Make sure we are looking at the same allocation 4368 if (alloc == st_alloc) { 4369 return true; 4370 } 4371 } 4372 4373 return false; 4374 } 4375 4376 // 4377 // Update the card table and add card address to the queue 4378 // 4379 void GraphKit::g1_mark_card(IdealKit& ideal, 4380 Node* card_adr, 4381 Node* oop_store, 4382 uint oop_alias_idx, 4383 Node* index, 4384 Node* index_adr, 4385 Node* buffer, 4386 const TypeFunc* tf) { 4387 4388 Node* zero = __ ConI(0); 4389 Node* zeroX = __ ConX(0); 4390 Node* no_base = __ top(); 4391 BasicType card_bt = T_BYTE; 4392 // Smash zero into card. MUST BE ORDERED WRT TO STORE 4393 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw); 4394 4395 // Now do the queue work 4396 __ if_then(index, BoolTest::ne, zeroX); { 4397 4398 Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t)))); 4399 Node* log_addr = __ AddP(no_base, buffer, next_index); 4400 4401 // Order, see storeCM. 4402 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); 4403 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered); 4404 4405 } __ else_(); { 4406 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); 4407 } __ end_if(); 4408 4409 } 4410 4411 void GraphKit::g1_write_barrier_post(Node* oop_store, 4412 Node* obj, 4413 Node* adr, 4414 uint alias_idx, 4415 Node* val, 4416 BasicType bt, 4417 bool use_precise) { 4418 // If we are writing a NULL then we need no post barrier 4419 4420 if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) { 4421 // Must be NULL 4422 const Type* t = val->bottom_type(); 4423 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL"); 4424 // No post barrier if writing NULLx 4425 return; 4426 } 4427 4428 if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) { 4429 // We can skip marks on a freshly-allocated object in Eden. 4430 // Keep this code in sync with new_store_pre_barrier() in runtime.cpp. 4431 // That routine informs GC to take appropriate compensating steps, 4432 // upon a slow-path allocation, so as to make this card-mark 4433 // elision safe. 4434 return; 4435 } 4436 4437 if (use_ReduceInitialCardMarks() 4438 && g1_can_remove_post_barrier(&_gvn, oop_store, adr)) { 4439 return; 4440 } 4441 4442 if (!use_precise) { 4443 // All card marks for a (non-array) instance are in one place: 4444 adr = obj; 4445 } 4446 // (Else it's an array (or unknown), and we want more precise card marks.) 4447 assert(adr != NULL, ""); 4448 4449 IdealKit ideal(this, true); 4450 4451 Node* tls = __ thread(); // ThreadLocalStorage 4452 4453 Node* no_base = __ top(); 4454 float likely = PROB_LIKELY(0.999); 4455 float unlikely = PROB_UNLIKELY(0.999); 4456 Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val()); 4457 Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val()); 4458 Node* zeroX = __ ConX(0); 4459 4460 // Get the alias_index for raw card-mark memory 4461 const TypePtr* card_type = TypeRawPtr::BOTTOM; 4462 4463 const TypeFunc *tf = OptoRuntime::g1_wb_post_Type(); 4464 4465 // Offsets into the thread 4466 const int index_offset = in_bytes(JavaThread::dirty_card_queue_offset() + 4467 DirtyCardQueue::byte_offset_of_index()); 4468 const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() + 4469 DirtyCardQueue::byte_offset_of_buf()); 4470 4471 // Pointers into the thread 4472 4473 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); 4474 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); 4475 4476 // Now some values 4477 // Use ctrl to avoid hoisting these values past a safepoint, which could 4478 // potentially reset these fields in the JavaThread. 4479 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw); 4480 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); 4481 4482 // Convert the store obj pointer to an int prior to doing math on it 4483 // Must use ctrl to prevent "integerized oop" existing across safepoint 4484 Node* cast = __ CastPX(__ ctrl(), adr); 4485 4486 // Divide pointer by card size 4487 Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) ); 4488 4489 // Combine card table base and card offset 4490 Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset ); 4491 4492 // If we know the value being stored does it cross regions? 4493 4494 if (val != NULL) { 4495 // Does the store cause us to cross regions? 4496 4497 // Should be able to do an unsigned compare of region_size instead of 4498 // and extra shift. Do we have an unsigned compare?? 4499 // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes); 4500 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes)); 4501 4502 // if (xor_res == 0) same region so skip 4503 __ if_then(xor_res, BoolTest::ne, zeroX); { 4504 4505 // No barrier if we are storing a NULL 4506 __ if_then(val, BoolTest::ne, null(), unlikely); { 4507 4508 // Ok must mark the card if not already dirty 4509 4510 // load the original value of the card 4511 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 4512 4513 __ if_then(card_val, BoolTest::ne, young_card); { 4514 sync_kit(ideal); 4515 // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier. 4516 insert_mem_bar(Op_MemBarVolatile, oop_store); 4517 __ sync_kit(this); 4518 4519 Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 4520 __ if_then(card_val_reload, BoolTest::ne, dirty_card); { 4521 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 4522 } __ end_if(); 4523 } __ end_if(); 4524 } __ end_if(); 4525 } __ end_if(); 4526 } else { 4527 // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks. 4528 // We don't need a barrier here if the destination is a newly allocated object 4529 // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden 4530 // are set to 'g1_young_gen' (see G1SATBCardTableModRefBS::verify_g1_young_region()). 4531 assert(!use_ReduceInitialCardMarks(), "can only happen with card marking"); 4532 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 4533 __ if_then(card_val, BoolTest::ne, young_card); { 4534 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 4535 } __ end_if(); 4536 } 4537 4538 // Final sync IdealKit and GraphKit. 4539 final_sync(ideal); 4540 } 4541 #undef __ 4542 4543 4544 Node* GraphKit::load_String_length(Node* ctrl, Node* str) { 4545 Node* len = load_array_length(load_String_value(ctrl, str)); 4546 Node* coder = load_String_coder(ctrl, str); 4547 // Divide length by 2 if coder is UTF16 4548 return _gvn.transform(new RShiftINode(len, coder)); 4549 } 4550 4551 Node* GraphKit::load_String_value(Node* ctrl, Node* str) { 4552 int value_offset = java_lang_String::value_offset_in_bytes(); 4553 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4554 false, NULL, Type::Offset(0)); 4555 const TypePtr* value_field_type = string_type->add_offset(value_offset); 4556 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, 4557 TypeAry::make(TypeInt::BYTE, TypeInt::POS), 4558 ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0)); 4559 int value_field_idx = C->get_alias_index(value_field_type); 4560 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset), 4561 value_type, T_OBJECT, value_field_idx, MemNode::unordered); 4562 // String.value field is known to be @Stable. 4563 if (UseImplicitStableValues) { 4564 load = cast_array_to_stable(load, value_type); 4565 } 4566 return load; 4567 } 4568 4569 Node* GraphKit::load_String_coder(Node* ctrl, Node* str) { 4570 if (!CompactStrings) { 4571 return intcon(java_lang_String::CODER_UTF16); 4572 } 4573 int coder_offset = java_lang_String::coder_offset_in_bytes(); 4574 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4575 false, NULL, Type::Offset(0)); 4576 const TypePtr* coder_field_type = string_type->add_offset(coder_offset); 4577 int coder_field_idx = C->get_alias_index(coder_field_type); 4578 return make_load(ctrl, basic_plus_adr(str, str, coder_offset), 4579 TypeInt::BYTE, T_BYTE, coder_field_idx, MemNode::unordered); 4580 } 4581 4582 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) { 4583 int value_offset = java_lang_String::value_offset_in_bytes(); 4584 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4585 false, NULL, Type::Offset(0)); 4586 const TypePtr* value_field_type = string_type->add_offset(value_offset); 4587 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type, 4588 value, TypeAryPtr::BYTES, T_OBJECT, MemNode::unordered); 4589 } 4590 4591 void GraphKit::store_String_coder(Node* ctrl, Node* str, Node* value) { 4592 int coder_offset = java_lang_String::coder_offset_in_bytes(); 4593 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4594 false, NULL, Type::Offset(0)); 4595 const TypePtr* coder_field_type = string_type->add_offset(coder_offset); 4596 int coder_field_idx = C->get_alias_index(coder_field_type); 4597 store_to_memory(ctrl, basic_plus_adr(str, coder_offset), 4598 value, T_BYTE, coder_field_idx, MemNode::unordered); 4599 } 4600 4601 // Capture src and dst memory state with a MergeMemNode 4602 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) { 4603 if (src_type == dst_type) { 4604 // Types are equal, we don't need a MergeMemNode 4605 return memory(src_type); 4606 } 4607 MergeMemNode* merge = MergeMemNode::make(map()->memory()); 4608 record_for_igvn(merge); // fold it up later, if possible 4609 int src_idx = C->get_alias_index(src_type); 4610 int dst_idx = C->get_alias_index(dst_type); 4611 merge->set_memory_at(src_idx, memory(src_idx)); 4612 merge->set_memory_at(dst_idx, memory(dst_idx)); 4613 return merge; 4614 } 4615 4616 Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) { 4617 assert(Matcher::match_rule_supported(Op_StrCompressedCopy), "Intrinsic not supported"); 4618 assert(src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr::CHARS, "invalid source type"); 4619 // If input and output memory types differ, capture both states to preserve 4620 // the dependency between preceding and subsequent loads/stores. 4621 // For example, the following program: 4622 // StoreB 4623 // compress_string 4624 // LoadB 4625 // has this memory graph (use->def): 4626 // LoadB -> compress_string -> CharMem 4627 // ... -> StoreB -> ByteMem 4628 // The intrinsic hides the dependency between LoadB and StoreB, causing 4629 // the load to read from memory not containing the result of the StoreB. 4630 // The correct memory graph should look like this: 4631 // LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem)) 4632 Node* mem = capture_memory(src_type, TypeAryPtr::BYTES); 4633 StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count); 4634 Node* res_mem = _gvn.transform(new SCMemProjNode(str)); 4635 set_memory(res_mem, TypeAryPtr::BYTES); 4636 return str; 4637 } 4638 4639 void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) { 4640 assert(Matcher::match_rule_supported(Op_StrInflatedCopy), "Intrinsic not supported"); 4641 assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type"); 4642 // Capture src and dst memory (see comment in 'compress_string'). 4643 Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type); 4644 StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count); 4645 set_memory(_gvn.transform(str), dst_type); 4646 } 4647 4648 void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) { 4649 /** 4650 * int i_char = start; 4651 * for (int i_byte = 0; i_byte < count; i_byte++) { 4652 * dst[i_char++] = (char)(src[i_byte] & 0xff); 4653 * } 4654 */ 4655 add_predicate(); 4656 RegionNode* head = new RegionNode(3); 4657 head->init_req(1, control()); 4658 gvn().set_type(head, Type::CONTROL); 4659 record_for_igvn(head); 4660 4661 Node* i_byte = new PhiNode(head, TypeInt::INT); 4662 i_byte->init_req(1, intcon(0)); 4663 gvn().set_type(i_byte, TypeInt::INT); 4664 record_for_igvn(i_byte); 4665 4666 Node* i_char = new PhiNode(head, TypeInt::INT); 4667 i_char->init_req(1, start); 4668 gvn().set_type(i_char, TypeInt::INT); 4669 record_for_igvn(i_char); 4670 4671 Node* mem = PhiNode::make(head, memory(TypeAryPtr::BYTES), Type::MEMORY, TypeAryPtr::BYTES); 4672 gvn().set_type(mem, Type::MEMORY); 4673 record_for_igvn(mem); 4674 set_control(head); 4675 set_memory(mem, TypeAryPtr::BYTES); 4676 Node* ch = load_array_element(control(), src, i_byte, TypeAryPtr::BYTES); 4677 Node* st = store_to_memory(control(), array_element_address(dst, i_char, T_BYTE), 4678 AndI(ch, intcon(0xff)), T_CHAR, TypeAryPtr::BYTES, MemNode::unordered, 4679 false, false, true /* mismatched */); 4680 4681 IfNode* iff = create_and_map_if(head, Bool(CmpI(i_byte, count), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN); 4682 head->init_req(2, IfTrue(iff)); 4683 mem->init_req(2, st); 4684 i_byte->init_req(2, AddI(i_byte, intcon(1))); 4685 i_char->init_req(2, AddI(i_char, intcon(2))); 4686 4687 set_control(IfFalse(iff)); 4688 set_memory(st, TypeAryPtr::BYTES); 4689 } 4690 4691 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) { 4692 if (!field->is_constant()) { 4693 return NULL; // Field not marked as constant. 4694 } 4695 ciInstance* holder = NULL; 4696 if (!field->is_static()) { 4697 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop(); 4698 if (const_oop != NULL && const_oop->is_instance()) { 4699 holder = const_oop->as_instance(); 4700 } 4701 } 4702 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(), 4703 /*is_unsigned_load=*/false); 4704 if (con_type != NULL) { 4705 Node* con = makecon(con_type); 4706 if (field->layout_type() == T_VALUETYPE) { 4707 // Load value type from constant oop 4708 con = ValueTypeNode::make_from_oop(this, con, field->type()->as_value_klass()); 4709 } 4710 return con; 4711 } 4712 return NULL; 4713 } 4714 4715 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) { 4716 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity 4717 // assumption of CCP analysis. 4718 return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true))); 4719 }