1 /* 2 * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "gc/shenandoah/c2/shenandoahSupport.hpp" 27 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 29 #include "gc/shenandoah/shenandoahForwarding.hpp" 30 #include "gc/shenandoah/shenandoahHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 32 #include "gc/shenandoah/shenandoahRuntime.hpp" 33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/block.hpp" 36 #include "opto/callnode.hpp" 37 #include "opto/castnode.hpp" 38 #include "opto/movenode.hpp" 39 #include "opto/phaseX.hpp" 40 #include "opto/rootnode.hpp" 41 #include "opto/runtime.hpp" 42 #include "opto/subnode.hpp" 43 44 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) { 45 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); 46 if ((state->enqueue_barriers_count() + 47 state->load_reference_barriers_count()) > 0) { 48 bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion; 49 C->clear_major_progress(); 50 PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand); 51 if (C->failing()) return false; 52 PhaseIdealLoop::verify(igvn); 53 DEBUG_ONLY(verify_raw_mem(C->root());) 54 if (attempt_more_loopopts) { 55 C->set_major_progress(); 56 if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) { 57 return false; 58 } 59 C->clear_major_progress(); 60 } 61 } 62 return true; 63 } 64 65 bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) { 66 if (!UseShenandoahGC) { 67 return false; 68 } 69 assert(iff->is_If(), "bad input"); 70 if (iff->Opcode() != Op_If) { 71 return false; 72 } 73 Node* bol = iff->in(1); 74 if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) { 75 return false; 76 } 77 Node* cmp = bol->in(1); 78 if (cmp->Opcode() != Op_CmpI) { 79 return false; 80 } 81 Node* in1 = cmp->in(1); 82 Node* in2 = cmp->in(2); 83 if (in2->find_int_con(-1) != 0) { 84 return false; 85 } 86 if (in1->Opcode() != Op_AndI) { 87 return false; 88 } 89 in2 = in1->in(2); 90 if (in2->find_int_con(-1) != mask) { 91 return false; 92 } 93 in1 = in1->in(1); 94 95 return is_gc_state_load(in1); 96 } 97 98 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) { 99 return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED); 100 } 101 102 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) { 103 if (!UseShenandoahGC) { 104 return false; 105 } 106 if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) { 107 return false; 108 } 109 Node* addp = n->in(MemNode::Address); 110 if (!addp->is_AddP()) { 111 return false; 112 } 113 Node* base = addp->in(AddPNode::Address); 114 Node* off = addp->in(AddPNode::Offset); 115 if (base->Opcode() != Op_ThreadLocal) { 116 return false; 117 } 118 if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) { 119 return false; 120 } 121 return true; 122 } 123 124 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) { 125 assert(phase->is_dominator(stop, start), "bad inputs"); 126 ResourceMark rm; 127 Unique_Node_List wq; 128 wq.push(start); 129 for (uint next = 0; next < wq.size(); next++) { 130 Node *m = wq.at(next); 131 if (m == stop) { 132 continue; 133 } 134 if (m->is_SafePoint() && !m->is_CallLeaf()) { 135 return true; 136 } 137 if (m->is_Region()) { 138 for (uint i = 1; i < m->req(); i++) { 139 wq.push(m->in(i)); 140 } 141 } else { 142 wq.push(m->in(0)); 143 } 144 } 145 return false; 146 } 147 148 bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) { 149 assert(is_gc_state_load(n), "inconsistent"); 150 Node* addp = n->in(MemNode::Address); 151 Node* dominator = NULL; 152 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 153 Node* u = addp->fast_out(i); 154 assert(is_gc_state_load(u), "inconsistent"); 155 if (u != n && phase->is_dominator(u->in(0), n->in(0))) { 156 if (dominator == NULL) { 157 dominator = u; 158 } else { 159 if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) { 160 dominator = u; 161 } 162 } 163 } 164 } 165 if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) { 166 return false; 167 } 168 phase->igvn().replace_node(n, dominator); 169 170 return true; 171 } 172 173 #ifdef ASSERT 174 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) { 175 assert(phis.size() == 0, ""); 176 177 while (true) { 178 if (in->bottom_type() == TypePtr::NULL_PTR) { 179 if (trace) {tty->print_cr("NULL");} 180 } else if (!in->bottom_type()->make_ptr()->make_oopptr()) { 181 if (trace) {tty->print_cr("Non oop");} 182 } else if (t == ShenandoahLoad && ShenandoahOptimizeStableFinals && 183 in->bottom_type()->make_ptr()->isa_aryptr() && 184 in->bottom_type()->make_ptr()->is_aryptr()->is_stable()) { 185 if (trace) {tty->print_cr("Stable array load");} 186 } else { 187 if (in->is_ConstraintCast()) { 188 in = in->in(1); 189 continue; 190 } else if (in->is_AddP()) { 191 assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access"); 192 in = in->in(AddPNode::Address); 193 continue; 194 } else if (in->is_Con()) { 195 if (trace) { 196 tty->print("Found constant"); 197 in->dump(); 198 } 199 } else if (in->Opcode() == Op_Parm) { 200 if (trace) { 201 tty->print("Found argument"); 202 } 203 } else if (in->Opcode() == Op_CreateEx) { 204 if (trace) { 205 tty->print("Found create-exception"); 206 } 207 } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) { 208 if (trace) { 209 tty->print("Found raw LoadP (OSR argument?)"); 210 } 211 } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 212 if (t == ShenandoahOopStore) { 213 uint i = 0; 214 for (; i < phis.size(); i++) { 215 Node* n = phis.node_at(i); 216 if (n->Opcode() == Op_ShenandoahEnqueueBarrier) { 217 break; 218 } 219 } 220 if (i == phis.size()) { 221 return false; 222 } 223 } 224 barriers_used.push(in); 225 if (trace) {tty->print("Found barrier"); in->dump();} 226 } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) { 227 if (t != ShenandoahOopStore) { 228 in = in->in(1); 229 continue; 230 } 231 if (trace) {tty->print("Found enqueue barrier"); in->dump();} 232 phis.push(in, in->req()); 233 in = in->in(1); 234 continue; 235 } else if (in->is_Proj() && in->in(0)->is_Allocate()) { 236 if (trace) { 237 tty->print("Found alloc"); 238 in->in(0)->dump(); 239 } 240 } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) { 241 if (trace) { 242 tty->print("Found Java call"); 243 } 244 } else if (in->is_Phi()) { 245 if (!visited.test_set(in->_idx)) { 246 if (trace) {tty->print("Pushed phi:"); in->dump();} 247 phis.push(in, 2); 248 in = in->in(1); 249 continue; 250 } 251 if (trace) {tty->print("Already seen phi:"); in->dump();} 252 } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) { 253 if (!visited.test_set(in->_idx)) { 254 if (trace) {tty->print("Pushed cmovep:"); in->dump();} 255 phis.push(in, CMoveNode::IfTrue); 256 in = in->in(CMoveNode::IfFalse); 257 continue; 258 } 259 if (trace) {tty->print("Already seen cmovep:"); in->dump();} 260 } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) { 261 in = in->in(1); 262 continue; 263 } else { 264 return false; 265 } 266 } 267 bool cont = false; 268 while (phis.is_nonempty()) { 269 uint idx = phis.index(); 270 Node* phi = phis.node(); 271 if (idx >= phi->req()) { 272 if (trace) {tty->print("Popped phi:"); phi->dump();} 273 phis.pop(); 274 continue; 275 } 276 if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();} 277 in = phi->in(idx); 278 phis.set_index(idx+1); 279 cont = true; 280 break; 281 } 282 if (!cont) { 283 break; 284 } 285 } 286 return true; 287 } 288 289 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) { 290 if (n1 != NULL) { 291 n1->dump(+10); 292 } 293 if (n2 != NULL) { 294 n2->dump(+10); 295 } 296 fatal("%s", msg); 297 } 298 299 void ShenandoahBarrierC2Support::verify(RootNode* root) { 300 ResourceMark rm; 301 Unique_Node_List wq; 302 GrowableArray<Node*> barriers; 303 Unique_Node_List barriers_used; 304 Node_Stack phis(0); 305 VectorSet visited(Thread::current()->resource_area()); 306 const bool trace = false; 307 const bool verify_no_useless_barrier = false; 308 309 wq.push(root); 310 for (uint next = 0; next < wq.size(); next++) { 311 Node *n = wq.at(next); 312 if (n->is_Load()) { 313 const bool trace = false; 314 if (trace) {tty->print("Verifying"); n->dump();} 315 if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) { 316 if (trace) {tty->print_cr("Load range/klass");} 317 } else { 318 const TypePtr* adr_type = n->as_Load()->adr_type(); 319 320 if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) { 321 if (trace) {tty->print_cr("Mark load");} 322 } else if (adr_type->isa_instptr() && 323 adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) && 324 adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) { 325 if (trace) {tty->print_cr("Reference.get()");} 326 } else { 327 bool verify = true; 328 if (adr_type->isa_instptr()) { 329 const TypeInstPtr* tinst = adr_type->is_instptr(); 330 ciKlass* k = tinst->klass(); 331 assert(k->is_instance_klass(), ""); 332 ciInstanceKlass* ik = (ciInstanceKlass*)k; 333 int offset = adr_type->offset(); 334 335 if ((ik->debug_final_field_at(offset) && ShenandoahOptimizeInstanceFinals) || 336 (ik->debug_stable_field_at(offset) && ShenandoahOptimizeStableFinals)) { 337 if (trace) {tty->print_cr("Final/stable");} 338 verify = false; 339 } else if (k == ciEnv::current()->Class_klass() && 340 tinst->const_oop() != NULL && 341 tinst->offset() >= (ik->size_helper() * wordSize)) { 342 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); 343 ciField* field = k->get_field_by_offset(tinst->offset(), true); 344 if ((ShenandoahOptimizeStaticFinals && field->is_final()) || 345 (ShenandoahOptimizeStableFinals && field->is_stable())) { 346 verify = false; 347 } 348 } 349 } 350 351 if (verify && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) { 352 report_verify_failure("Shenandoah verification: Load should have barriers", n); 353 } 354 } 355 } 356 } else if (n->is_Store()) { 357 const bool trace = false; 358 359 if (trace) {tty->print("Verifying"); n->dump();} 360 if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) { 361 Node* adr = n->in(MemNode::Address); 362 bool verify = true; 363 364 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) { 365 adr = adr->in(AddPNode::Address); 366 if (adr->is_AddP()) { 367 assert(adr->in(AddPNode::Base)->is_top(), ""); 368 adr = adr->in(AddPNode::Address); 369 if (adr->Opcode() == Op_LoadP && 370 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() && 371 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && 372 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) { 373 if (trace) {tty->print_cr("SATB prebarrier");} 374 verify = false; 375 } 376 } 377 } 378 379 if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { 380 report_verify_failure("Shenandoah verification: Store should have barriers", n); 381 } 382 } 383 if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { 384 report_verify_failure("Shenandoah verification: Store (address) should have barriers", n); 385 } 386 } else if (n->Opcode() == Op_CmpP) { 387 const bool trace = false; 388 389 Node* in1 = n->in(1); 390 Node* in2 = n->in(2); 391 if (in1->bottom_type()->isa_oopptr()) { 392 if (trace) {tty->print("Verifying"); n->dump();} 393 394 bool mark_inputs = false; 395 if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR || 396 (in1->is_Con() || in2->is_Con())) { 397 if (trace) {tty->print_cr("Comparison against a constant");} 398 mark_inputs = true; 399 } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) || 400 (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) { 401 if (trace) {tty->print_cr("Comparison with newly alloc'ed object");} 402 mark_inputs = true; 403 } else { 404 assert(in2->bottom_type()->isa_oopptr(), ""); 405 406 if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) || 407 !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) { 408 report_verify_failure("Shenandoah verification: Cmp should have barriers", n); 409 } 410 } 411 if (verify_no_useless_barrier && 412 mark_inputs && 413 (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) || 414 !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) { 415 phis.clear(); 416 visited.Reset(); 417 } 418 } 419 } else if (n->is_LoadStore()) { 420 if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() && 421 !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { 422 report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n); 423 } 424 425 if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { 426 report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n); 427 } 428 } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) { 429 CallNode* call = n->as_Call(); 430 431 static struct { 432 const char* name; 433 struct { 434 int pos; 435 verify_type t; 436 } args[6]; 437 } calls[] = { 438 "aescrypt_encryptBlock", 439 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 440 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 441 "aescrypt_decryptBlock", 442 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 443 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 444 "multiplyToLen", 445 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore }, 446 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 447 "squareToLen", 448 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone}, 449 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 450 "montgomery_multiply", 451 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, 452 { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 453 "montgomery_square", 454 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore }, 455 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 456 "mulAdd", 457 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, 458 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 459 "vectorizedMismatch", 460 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, 461 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 462 "updateBytesCRC32", 463 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 464 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 465 "updateBytesAdler32", 466 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 467 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 468 "updateBytesCRC32C", 469 { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone}, 470 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 471 "counterMode_AESCrypt", 472 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 473 { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } }, 474 "cipherBlockChaining_encryptAESCrypt", 475 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 476 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 477 "cipherBlockChaining_decryptAESCrypt", 478 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 479 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 480 "shenandoah_clone_barrier", 481 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 482 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 483 "ghash_processBlocks", 484 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, 485 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 486 "sha1_implCompress", 487 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 488 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 489 "sha256_implCompress", 490 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 491 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 492 "sha512_implCompress", 493 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 494 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 495 "sha1_implCompressMB", 496 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 497 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 498 "sha256_implCompressMB", 499 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 500 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 501 "sha512_implCompressMB", 502 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 503 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 504 "encodeBlock", 505 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone }, 506 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 507 }; 508 509 if (call->is_call_to_arraycopystub()) { 510 Node* dest = NULL; 511 const TypeTuple* args = n->as_Call()->_tf->domain(); 512 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 513 if (args->field_at(i)->isa_ptr()) { 514 j++; 515 if (j == 2) { 516 dest = n->in(i); 517 break; 518 } 519 } 520 } 521 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) || 522 !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) { 523 report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n); 524 } 525 } else if (strlen(call->_name) > 5 && 526 !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) { 527 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) { 528 report_verify_failure("Shenandoah verification: _fill should have barriers", n); 529 } 530 } else if (!strcmp(call->_name, "shenandoah_wb_pre")) { 531 // skip 532 } else { 533 const int calls_len = sizeof(calls) / sizeof(calls[0]); 534 int i = 0; 535 for (; i < calls_len; i++) { 536 if (!strcmp(calls[i].name, call->_name)) { 537 break; 538 } 539 } 540 if (i != calls_len) { 541 const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]); 542 for (uint j = 0; j < args_len; j++) { 543 int pos = calls[i].args[j].pos; 544 if (pos == -1) { 545 break; 546 } 547 if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) { 548 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); 549 } 550 } 551 for (uint j = TypeFunc::Parms; j < call->req(); j++) { 552 if (call->in(j)->bottom_type()->make_ptr() && 553 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { 554 uint k = 0; 555 for (; k < args_len && calls[i].args[k].pos != (int)j; k++); 556 if (k == args_len) { 557 fatal("arg %d for call %s not covered", j, call->_name); 558 } 559 } 560 } 561 } else { 562 for (uint j = TypeFunc::Parms; j < call->req(); j++) { 563 if (call->in(j)->bottom_type()->make_ptr() && 564 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { 565 fatal("%s not covered", call->_name); 566 } 567 } 568 } 569 } 570 } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 571 // skip 572 } else if (n->is_AddP() 573 || n->is_Phi() 574 || n->is_ConstraintCast() 575 || n->Opcode() == Op_Return 576 || n->Opcode() == Op_CMoveP 577 || n->Opcode() == Op_CMoveN 578 || n->Opcode() == Op_Rethrow 579 || n->is_MemBar() 580 || n->Opcode() == Op_Conv2B 581 || n->Opcode() == Op_SafePoint 582 || n->is_CallJava() 583 || n->Opcode() == Op_Unlock 584 || n->Opcode() == Op_EncodeP 585 || n->Opcode() == Op_DecodeN) { 586 // nothing to do 587 } else { 588 static struct { 589 int opcode; 590 struct { 591 int pos; 592 verify_type t; 593 } inputs[2]; 594 } others[] = { 595 Op_FastLock, 596 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, 597 Op_Lock, 598 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} }, 599 Op_ArrayCopy, 600 { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } }, 601 Op_StrCompressedCopy, 602 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 603 Op_StrInflatedCopy, 604 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 605 Op_AryEq, 606 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, 607 Op_StrIndexOf, 608 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, 609 Op_StrComp, 610 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, 611 Op_StrEquals, 612 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, 613 Op_EncodeISOArray, 614 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 615 Op_HasNegatives, 616 { { 2, ShenandoahLoad }, { -1, ShenandoahNone} }, 617 Op_CastP2X, 618 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, 619 Op_StrIndexOfChar, 620 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } }, 621 }; 622 623 const int others_len = sizeof(others) / sizeof(others[0]); 624 int i = 0; 625 for (; i < others_len; i++) { 626 if (others[i].opcode == n->Opcode()) { 627 break; 628 } 629 } 630 uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req(); 631 if (i != others_len) { 632 const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]); 633 for (uint j = 0; j < inputs_len; j++) { 634 int pos = others[i].inputs[j].pos; 635 if (pos == -1) { 636 break; 637 } 638 if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) { 639 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); 640 } 641 } 642 for (uint j = 1; j < stop; j++) { 643 if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() && 644 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { 645 uint k = 0; 646 for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++); 647 if (k == inputs_len) { 648 fatal("arg %d for node %s not covered", j, n->Name()); 649 } 650 } 651 } 652 } else { 653 for (uint j = 1; j < stop; j++) { 654 if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() && 655 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { 656 fatal("%s not covered", n->Name()); 657 } 658 } 659 } 660 } 661 662 if (n->is_SafePoint()) { 663 SafePointNode* sfpt = n->as_SafePoint(); 664 if (verify_no_useless_barrier && sfpt->jvms() != NULL) { 665 for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) { 666 if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) { 667 phis.clear(); 668 visited.Reset(); 669 } 670 } 671 } 672 } 673 for( uint i = 0; i < n->len(); ++i ) { 674 Node *m = n->in(i); 675 if (m == NULL) continue; 676 677 // In most cases, inputs should be known to be non null. If it's 678 // not the case, it could be a missing cast_not_null() in an 679 // intrinsic or support might be needed in AddPNode::Ideal() to 680 // avoid a NULL+offset input. 681 if (!(n->is_Phi() || 682 (n->is_SafePoint() && (!n->is_CallRuntime() || !strcmp(n->as_Call()->_name, "shenandoah_wb_pre") || !strcmp(n->as_Call()->_name, "unsafe_arraycopy"))) || 683 n->Opcode() == Op_CmpP || 684 n->Opcode() == Op_CmpN || 685 (n->Opcode() == Op_StoreP && i == StoreNode::ValueIn) || 686 (n->Opcode() == Op_StoreN && i == StoreNode::ValueIn) || 687 n->is_ConstraintCast() || 688 n->Opcode() == Op_Return || 689 n->Opcode() == Op_Conv2B || 690 n->is_AddP() || 691 n->Opcode() == Op_CMoveP || 692 n->Opcode() == Op_CMoveN || 693 n->Opcode() == Op_Rethrow || 694 n->is_MemBar() || 695 n->is_Mem() || 696 n->Opcode() == Op_AryEq || 697 n->Opcode() == Op_SCMemProj || 698 n->Opcode() == Op_EncodeP || 699 n->Opcode() == Op_DecodeN || 700 n->Opcode() == Op_ShenandoahEnqueueBarrier || 701 n->Opcode() == Op_ShenandoahLoadReferenceBarrier)) { 702 if (m->bottom_type()->make_oopptr() && m->bottom_type()->make_oopptr()->meet(TypePtr::NULL_PTR) == m->bottom_type()) { 703 report_verify_failure("Shenandoah verification: null input", n, m); 704 } 705 } 706 707 wq.push(m); 708 } 709 } 710 711 if (verify_no_useless_barrier) { 712 for (int i = 0; i < barriers.length(); i++) { 713 Node* n = barriers.at(i); 714 if (!barriers_used.member(n)) { 715 tty->print("XXX useless barrier"); n->dump(-2); 716 ShouldNotReachHere(); 717 } 718 } 719 } 720 } 721 #endif 722 723 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) { 724 // That both nodes have the same control is not sufficient to prove 725 // domination, verify that there's no path from d to n 726 ResourceMark rm; 727 Unique_Node_List wq; 728 wq.push(d); 729 for (uint next = 0; next < wq.size(); next++) { 730 Node *m = wq.at(next); 731 if (m == n) { 732 return false; 733 } 734 if (m->is_Phi() && m->in(0)->is_Loop()) { 735 assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control"); 736 } else { 737 for (uint i = 0; i < m->req(); i++) { 738 if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) { 739 wq.push(m->in(i)); 740 } 741 } 742 } 743 } 744 return true; 745 } 746 747 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) { 748 if (d_c != n_c) { 749 return phase->is_dominator(d_c, n_c); 750 } 751 return is_dominator_same_ctrl(d_c, d, n, phase); 752 } 753 754 Node* next_mem(Node* mem, int alias) { 755 Node* res = NULL; 756 if (mem->is_Proj()) { 757 res = mem->in(0); 758 } else if (mem->is_SafePoint() || mem->is_MemBar()) { 759 res = mem->in(TypeFunc::Memory); 760 } else if (mem->is_Phi()) { 761 res = mem->in(1); 762 } else if (mem->is_MergeMem()) { 763 res = mem->as_MergeMem()->memory_at(alias); 764 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { 765 assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier"); 766 res = mem->in(MemNode::Memory); 767 } else { 768 #ifdef ASSERT 769 mem->dump(); 770 #endif 771 ShouldNotReachHere(); 772 } 773 return res; 774 } 775 776 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) { 777 Node* iffproj = NULL; 778 while (c != dom) { 779 Node* next = phase->idom(c); 780 assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?"); 781 if (c->is_Region()) { 782 ResourceMark rm; 783 Unique_Node_List wq; 784 wq.push(c); 785 for (uint i = 0; i < wq.size(); i++) { 786 Node *n = wq.at(i); 787 if (n == next) { 788 continue; 789 } 790 if (n->is_Region()) { 791 for (uint j = 1; j < n->req(); j++) { 792 wq.push(n->in(j)); 793 } 794 } else { 795 wq.push(n->in(0)); 796 } 797 } 798 for (uint i = 0; i < wq.size(); i++) { 799 Node *n = wq.at(i); 800 assert(n->is_CFG(), ""); 801 if (n->is_Multi()) { 802 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 803 Node* u = n->fast_out(j); 804 if (u->is_CFG()) { 805 if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) { 806 return NodeSentinel; 807 } 808 } 809 } 810 } 811 } 812 } else if (c->is_Proj()) { 813 if (c->is_IfProj()) { 814 if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) { 815 // continue; 816 } else { 817 if (!allow_one_proj) { 818 return NodeSentinel; 819 } 820 if (iffproj == NULL) { 821 iffproj = c; 822 } else { 823 return NodeSentinel; 824 } 825 } 826 } else if (c->Opcode() == Op_JumpProj) { 827 return NodeSentinel; // unsupported 828 } else if (c->Opcode() == Op_CatchProj) { 829 return NodeSentinel; // unsupported 830 } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) { 831 return NodeSentinel; // unsupported 832 } else { 833 assert(next->unique_ctrl_out() == c, "unsupported branch pattern"); 834 } 835 } 836 c = next; 837 } 838 return iffproj; 839 } 840 841 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) { 842 ResourceMark rm; 843 VectorSet wq(Thread::current()->resource_area()); 844 wq.set(mem->_idx); 845 mem_ctrl = phase->ctrl_or_self(mem); 846 while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) { 847 mem = next_mem(mem, alias); 848 if (wq.test_set(mem->_idx)) { 849 return NULL; 850 } 851 mem_ctrl = phase->ctrl_or_self(mem); 852 } 853 if (mem->is_MergeMem()) { 854 mem = mem->as_MergeMem()->memory_at(alias); 855 mem_ctrl = phase->ctrl_or_self(mem); 856 } 857 return mem; 858 } 859 860 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) { 861 Node* mem = NULL; 862 Node* c = ctrl; 863 do { 864 if (c->is_Region()) { 865 Node* phi_bottom = NULL; 866 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) { 867 Node* u = c->fast_out(i); 868 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { 869 if (u->adr_type() == TypePtr::BOTTOM) { 870 mem = u; 871 } 872 } 873 } 874 } else { 875 if (c->is_Call() && c->as_Call()->adr_type() != NULL) { 876 CallProjections projs; 877 c->as_Call()->extract_projections(&projs, true, false); 878 if (projs.fallthrough_memproj != NULL) { 879 if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) { 880 if (projs.catchall_memproj == NULL) { 881 mem = projs.fallthrough_memproj; 882 } else { 883 if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) { 884 mem = projs.fallthrough_memproj; 885 } else { 886 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier"); 887 mem = projs.catchall_memproj; 888 } 889 } 890 } 891 } else { 892 Node* proj = c->as_Call()->proj_out(TypeFunc::Memory); 893 if (proj != NULL && 894 proj->adr_type() == TypePtr::BOTTOM) { 895 mem = proj; 896 } 897 } 898 } else { 899 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { 900 Node* u = c->fast_out(i); 901 if (u->is_Proj() && 902 u->bottom_type() == Type::MEMORY && 903 u->adr_type() == TypePtr::BOTTOM) { 904 assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), ""); 905 assert(mem == NULL, "only one proj"); 906 mem = u; 907 } 908 } 909 assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected"); 910 } 911 } 912 c = phase->idom(c); 913 } while (mem == NULL); 914 return mem; 915 } 916 917 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) { 918 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 919 Node* u = n->fast_out(i); 920 if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) { 921 uses.push(u); 922 } 923 } 924 } 925 926 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) { 927 OuterStripMinedLoopEndNode* le = inner->outer_loop_end(); 928 Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl)); 929 phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl)); 930 Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt); 931 phase->register_control(new_le, phase->get_loop(le), le->in(0)); 932 phase->lazy_replace(outer, new_outer); 933 phase->lazy_replace(le, new_le); 934 inner->clear_strip_mined(); 935 } 936 937 void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl, 938 PhaseIdealLoop* phase) { 939 IdealLoopTree* loop = phase->get_loop(ctrl); 940 Node* thread = new ThreadLocalNode(); 941 phase->register_new_node(thread, ctrl); 942 Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 943 phase->set_ctrl(offset, phase->C->root()); 944 Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset); 945 phase->register_new_node(gc_state_addr, ctrl); 946 uint gc_state_idx = Compile::AliasIdxRaw; 947 const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument 948 debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx)); 949 950 Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered); 951 phase->register_new_node(gc_state, ctrl); 952 Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED)); 953 phase->register_new_node(heap_stable_and, ctrl); 954 Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT)); 955 phase->register_new_node(heap_stable_cmp, ctrl); 956 Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne); 957 phase->register_new_node(heap_stable_test, ctrl); 958 IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); 959 phase->register_control(heap_stable_iff, loop, ctrl); 960 961 heap_stable_ctrl = new IfFalseNode(heap_stable_iff); 962 phase->register_control(heap_stable_ctrl, loop, heap_stable_iff); 963 ctrl = new IfTrueNode(heap_stable_iff); 964 phase->register_control(ctrl, loop, heap_stable_iff); 965 966 assert(is_heap_stable_test(heap_stable_iff), "Should match the shape"); 967 } 968 969 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) { 970 const Type* val_t = phase->igvn().type(val); 971 if (val_t->meet(TypePtr::NULL_PTR) == val_t) { 972 IdealLoopTree* loop = phase->get_loop(ctrl); 973 Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT)); 974 phase->register_new_node(null_cmp, ctrl); 975 Node* null_test = new BoolNode(null_cmp, BoolTest::ne); 976 phase->register_new_node(null_test, ctrl); 977 IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); 978 phase->register_control(null_iff, loop, ctrl); 979 ctrl = new IfTrueNode(null_iff); 980 phase->register_control(ctrl, loop, null_iff); 981 null_ctrl = new IfFalseNode(null_iff); 982 phase->register_control(null_ctrl, loop, null_iff); 983 } 984 } 985 986 Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) { 987 IdealLoopTree *loop = phase->get_loop(c); 988 Node* iff = unc_ctrl->in(0); 989 assert(iff->is_If(), "broken"); 990 Node* new_iff = iff->clone(); 991 new_iff->set_req(0, c); 992 phase->register_control(new_iff, loop, c); 993 Node* iffalse = new IfFalseNode(new_iff->as_If()); 994 phase->register_control(iffalse, loop, new_iff); 995 Node* iftrue = new IfTrueNode(new_iff->as_If()); 996 phase->register_control(iftrue, loop, new_iff); 997 c = iftrue; 998 const Type *t = phase->igvn().type(val); 999 assert(val->Opcode() == Op_CastPP, "expect cast to non null here"); 1000 Node* uncasted_val = val->in(1); 1001 val = new CastPPNode(uncasted_val, t); 1002 val->init_req(0, c); 1003 phase->register_new_node(val, c); 1004 return val; 1005 } 1006 1007 void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, 1008 Unique_Node_List& uses, PhaseIdealLoop* phase) { 1009 IfNode* iff = unc_ctrl->in(0)->as_If(); 1010 Node* proj = iff->proj_out(0); 1011 assert(proj != unc_ctrl, "bad projection"); 1012 Node* use = proj->unique_ctrl_out(); 1013 1014 assert(use == unc || use->is_Region(), "what else?"); 1015 1016 uses.clear(); 1017 if (use == unc) { 1018 phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use)); 1019 for (uint i = 1; i < unc->req(); i++) { 1020 Node* n = unc->in(i); 1021 if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) { 1022 uses.push(n); 1023 } 1024 } 1025 } else { 1026 assert(use->is_Region(), "what else?"); 1027 uint idx = 1; 1028 for (; use->in(idx) != proj; idx++); 1029 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { 1030 Node* u = use->fast_out(i); 1031 if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) { 1032 uses.push(u->in(idx)); 1033 } 1034 } 1035 } 1036 for(uint next = 0; next < uses.size(); next++ ) { 1037 Node *n = uses.at(next); 1038 assert(phase->get_ctrl(n) == proj, "bad control"); 1039 phase->set_ctrl_and_loop(n, new_unc_ctrl); 1040 if (n->in(0) == proj) { 1041 phase->igvn().replace_input_of(n, 0, new_unc_ctrl); 1042 } 1043 for (uint i = 0; i < n->req(); i++) { 1044 Node* m = n->in(i); 1045 if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) { 1046 uses.push(m); 1047 } 1048 } 1049 } 1050 1051 phase->igvn().rehash_node_delayed(use); 1052 int nb = use->replace_edge(proj, new_unc_ctrl); 1053 assert(nb == 1, "only use expected"); 1054 } 1055 1056 void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) { 1057 IdealLoopTree *loop = phase->get_loop(ctrl); 1058 Node* raw_rbtrue = new CastP2XNode(ctrl, val); 1059 phase->register_new_node(raw_rbtrue, ctrl); 1060 Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint())); 1061 phase->register_new_node(cset_offset, ctrl); 1062 Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr())); 1063 phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root()); 1064 Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset); 1065 phase->register_new_node(in_cset_fast_test_adr, ctrl); 1066 uint in_cset_fast_test_idx = Compile::AliasIdxRaw; 1067 const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument 1068 debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx)); 1069 Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered); 1070 phase->register_new_node(in_cset_fast_test_load, ctrl); 1071 Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT)); 1072 phase->register_new_node(in_cset_fast_test_cmp, ctrl); 1073 Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq); 1074 phase->register_new_node(in_cset_fast_test_test, ctrl); 1075 IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); 1076 phase->register_control(in_cset_fast_test_iff, loop, ctrl); 1077 1078 not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff); 1079 phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff); 1080 1081 ctrl = new IfFalseNode(in_cset_fast_test_iff); 1082 phase->register_control(ctrl, loop, in_cset_fast_test_iff); 1083 } 1084 1085 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase) { 1086 IdealLoopTree*loop = phase->get_loop(ctrl); 1087 const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst(); 1088 1089 // The slow path stub consumes and produces raw memory in addition 1090 // to the existing memory edges 1091 Node* base = find_bottom_mem(ctrl, phase); 1092 MergeMemNode* mm = MergeMemNode::make(base); 1093 mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); 1094 phase->register_new_node(mm, ctrl); 1095 1096 address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ? 1097 CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup_narrow_JRT) : 1098 CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup_JRT); 1099 1100 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), target, "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM); 1101 call->init_req(TypeFunc::Control, ctrl); 1102 call->init_req(TypeFunc::I_O, phase->C->top()); 1103 call->init_req(TypeFunc::Memory, mm); 1104 call->init_req(TypeFunc::FramePtr, phase->C->top()); 1105 call->init_req(TypeFunc::ReturnAdr, phase->C->top()); 1106 call->init_req(TypeFunc::Parms, val); 1107 call->init_req(TypeFunc::Parms+1, load_addr); 1108 phase->register_control(call, loop, ctrl); 1109 ctrl = new ProjNode(call, TypeFunc::Control); 1110 phase->register_control(ctrl, loop, call); 1111 result_mem = new ProjNode(call, TypeFunc::Memory); 1112 phase->register_new_node(result_mem, call); 1113 val = new ProjNode(call, TypeFunc::Parms); 1114 phase->register_new_node(val, call); 1115 val = new CheckCastPPNode(ctrl, val, obj_type); 1116 phase->register_new_node(val, ctrl); 1117 } 1118 1119 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) { 1120 Node* ctrl = phase->get_ctrl(barrier); 1121 Node* init_raw_mem = fixer.find_mem(ctrl, barrier); 1122 1123 // Update the control of all nodes that should be after the 1124 // barrier control flow 1125 uses.clear(); 1126 // Every node that is control dependent on the barrier's input 1127 // control will be after the expanded barrier. The raw memory (if 1128 // its memory is control dependent on the barrier's input control) 1129 // must stay above the barrier. 1130 uses_to_ignore.clear(); 1131 if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) { 1132 uses_to_ignore.push(init_raw_mem); 1133 } 1134 for (uint next = 0; next < uses_to_ignore.size(); next++) { 1135 Node *n = uses_to_ignore.at(next); 1136 for (uint i = 0; i < n->req(); i++) { 1137 Node* in = n->in(i); 1138 if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) { 1139 uses_to_ignore.push(in); 1140 } 1141 } 1142 } 1143 for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) { 1144 Node* u = ctrl->fast_out(i); 1145 if (u->_idx < last && 1146 u != barrier && 1147 !uses_to_ignore.member(u) && 1148 (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) && 1149 (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) { 1150 Node* old_c = phase->ctrl_or_self(u); 1151 Node* c = old_c; 1152 if (c != ctrl || 1153 is_dominator_same_ctrl(old_c, barrier, u, phase) || 1154 ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) { 1155 phase->igvn().rehash_node_delayed(u); 1156 int nb = u->replace_edge(ctrl, region); 1157 if (u->is_CFG()) { 1158 if (phase->idom(u) == ctrl) { 1159 phase->set_idom(u, region, phase->dom_depth(region)); 1160 } 1161 } else if (phase->get_ctrl(u) == ctrl) { 1162 assert(u != init_raw_mem, "should leave input raw mem above the barrier"); 1163 uses.push(u); 1164 } 1165 assert(nb == 1, "more than 1 ctrl input?"); 1166 --i, imax -= nb; 1167 } 1168 } 1169 } 1170 } 1171 1172 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) { 1173 Node* region = NULL; 1174 while (c != ctrl) { 1175 if (c->is_Region()) { 1176 region = c; 1177 } 1178 c = phase->idom(c); 1179 } 1180 assert(region != NULL, ""); 1181 Node* phi = new PhiNode(region, n->bottom_type()); 1182 for (uint j = 1; j < region->req(); j++) { 1183 Node* in = region->in(j); 1184 if (phase->is_dominator(projs.fallthrough_catchproj, in)) { 1185 phi->init_req(j, n); 1186 } else if (phase->is_dominator(projs.catchall_catchproj, in)) { 1187 phi->init_req(j, n_clone); 1188 } else { 1189 phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase)); 1190 } 1191 } 1192 phase->register_new_node(phi, region); 1193 return phi; 1194 } 1195 1196 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) { 1197 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); 1198 1199 Unique_Node_List uses; 1200 for (int i = 0; i < state->enqueue_barriers_count(); i++) { 1201 Node* barrier = state->enqueue_barrier(i); 1202 Node* ctrl = phase->get_ctrl(barrier); 1203 IdealLoopTree* loop = phase->get_loop(ctrl); 1204 if (loop->_head->is_OuterStripMinedLoop()) { 1205 // Expanding a barrier here will break loop strip mining 1206 // verification. Transform the loop so the loop nest doesn't 1207 // appear as strip mined. 1208 OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop(); 1209 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); 1210 } 1211 } 1212 1213 Node_Stack stack(0); 1214 Node_List clones; 1215 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { 1216 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); 1217 if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) { 1218 continue; 1219 } 1220 1221 Node* ctrl = phase->get_ctrl(lrb); 1222 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); 1223 1224 CallStaticJavaNode* unc = NULL; 1225 Node* unc_ctrl = NULL; 1226 Node* uncasted_val = val; 1227 1228 for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { 1229 Node* u = lrb->fast_out(i); 1230 if (u->Opcode() == Op_CastPP && 1231 u->in(0) != NULL && 1232 phase->is_dominator(u->in(0), ctrl)) { 1233 const Type* u_t = phase->igvn().type(u); 1234 1235 if (u_t->meet(TypePtr::NULL_PTR) != u_t && 1236 u->in(0)->Opcode() == Op_IfTrue && 1237 u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && 1238 u->in(0)->in(0)->is_If() && 1239 u->in(0)->in(0)->in(1)->Opcode() == Op_Bool && 1240 u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && 1241 u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && 1242 u->in(0)->in(0)->in(1)->in(1)->in(1) == val && 1243 u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { 1244 IdealLoopTree* loop = phase->get_loop(ctrl); 1245 IdealLoopTree* unc_loop = phase->get_loop(u->in(0)); 1246 1247 if (!unc_loop->is_member(loop)) { 1248 continue; 1249 } 1250 1251 Node* branch = no_branches(ctrl, u->in(0), false, phase); 1252 assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch"); 1253 if (branch == NodeSentinel) { 1254 continue; 1255 } 1256 1257 phase->igvn().replace_input_of(u, 1, val); 1258 phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u); 1259 phase->set_ctrl(u, u->in(0)); 1260 phase->set_ctrl(lrb, u->in(0)); 1261 unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 1262 unc_ctrl = u->in(0); 1263 val = u; 1264 1265 for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { 1266 Node* u = val->fast_out(j); 1267 if (u == lrb) continue; 1268 phase->igvn().rehash_node_delayed(u); 1269 int nb = u->replace_edge(val, lrb); 1270 --j; jmax -= nb; 1271 } 1272 1273 RegionNode* r = new RegionNode(3); 1274 IfNode* iff = unc_ctrl->in(0)->as_If(); 1275 1276 Node* ctrl_use = unc_ctrl->unique_ctrl_out(); 1277 Node* unc_ctrl_clone = unc_ctrl->clone(); 1278 phase->register_control(unc_ctrl_clone, loop, iff); 1279 Node* c = unc_ctrl_clone; 1280 Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase); 1281 r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0)); 1282 1283 phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0)); 1284 phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl)); 1285 phase->lazy_replace(c, unc_ctrl); 1286 c = NULL;; 1287 phase->igvn().replace_input_of(val, 0, unc_ctrl_clone); 1288 phase->set_ctrl(val, unc_ctrl_clone); 1289 1290 IfNode* new_iff = new_cast->in(0)->in(0)->as_If(); 1291 fix_null_check(unc, unc_ctrl_clone, r, uses, phase); 1292 Node* iff_proj = iff->proj_out(0); 1293 r->init_req(2, iff_proj); 1294 phase->register_control(r, phase->ltree_root(), iff); 1295 1296 Node* new_bol = new_iff->in(1)->clone(); 1297 Node* new_cmp = new_bol->in(1)->clone(); 1298 assert(new_cmp->Opcode() == Op_CmpP, "broken"); 1299 assert(new_cmp->in(1) == val->in(1), "broken"); 1300 new_bol->set_req(1, new_cmp); 1301 new_cmp->set_req(1, lrb); 1302 phase->register_new_node(new_bol, new_iff->in(0)); 1303 phase->register_new_node(new_cmp, new_iff->in(0)); 1304 phase->igvn().replace_input_of(new_iff, 1, new_bol); 1305 phase->igvn().replace_input_of(new_cast, 1, lrb); 1306 1307 for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { 1308 Node* u = lrb->fast_out(i); 1309 if (u == new_cast || u == new_cmp) { 1310 continue; 1311 } 1312 phase->igvn().rehash_node_delayed(u); 1313 int nb = u->replace_edge(lrb, new_cast); 1314 assert(nb > 0, "no update?"); 1315 --i; imax -= nb; 1316 } 1317 1318 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { 1319 Node* u = val->fast_out(i); 1320 if (u == lrb) { 1321 continue; 1322 } 1323 phase->igvn().rehash_node_delayed(u); 1324 int nb = u->replace_edge(val, new_cast); 1325 assert(nb > 0, "no update?"); 1326 --i; imax -= nb; 1327 } 1328 1329 ctrl = unc_ctrl_clone; 1330 phase->set_ctrl_and_loop(lrb, ctrl); 1331 break; 1332 } 1333 } 1334 } 1335 if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) { 1336 CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava(); 1337 CallProjections projs; 1338 call->extract_projections(&projs, false, false); 1339 1340 Node* lrb_clone = lrb->clone(); 1341 phase->register_new_node(lrb_clone, projs.catchall_catchproj); 1342 phase->set_ctrl(lrb, projs.fallthrough_catchproj); 1343 1344 stack.push(lrb, 0); 1345 clones.push(lrb_clone); 1346 1347 do { 1348 assert(stack.size() == clones.size(), ""); 1349 Node* n = stack.node(); 1350 #ifdef ASSERT 1351 if (n->is_Load()) { 1352 Node* mem = n->in(MemNode::Memory); 1353 for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) { 1354 Node* u = mem->fast_out(j); 1355 assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?"); 1356 } 1357 } 1358 #endif 1359 uint idx = stack.index(); 1360 Node* n_clone = clones.at(clones.size()-1); 1361 if (idx < n->outcnt()) { 1362 Node* u = n->raw_out(idx); 1363 Node* c = phase->ctrl_or_self(u); 1364 if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) { 1365 stack.set_index(idx+1); 1366 assert(!u->is_CFG(), ""); 1367 stack.push(u, 0); 1368 Node* u_clone = u->clone(); 1369 int nb = u_clone->replace_edge(n, n_clone); 1370 assert(nb > 0, "should have replaced some uses"); 1371 phase->register_new_node(u_clone, projs.catchall_catchproj); 1372 clones.push(u_clone); 1373 phase->set_ctrl(u, projs.fallthrough_catchproj); 1374 } else { 1375 bool replaced = false; 1376 if (u->is_Phi()) { 1377 for (uint k = 1; k < u->req(); k++) { 1378 if (u->in(k) == n) { 1379 if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) { 1380 phase->igvn().replace_input_of(u, k, n_clone); 1381 replaced = true; 1382 } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) { 1383 phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase)); 1384 replaced = true; 1385 } 1386 } 1387 } 1388 } else { 1389 if (phase->is_dominator(projs.catchall_catchproj, c)) { 1390 phase->igvn().rehash_node_delayed(u); 1391 int nb = u->replace_edge(n, n_clone); 1392 assert(nb > 0, "should have replaced some uses"); 1393 replaced = true; 1394 } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) { 1395 phase->igvn().rehash_node_delayed(u); 1396 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase)); 1397 assert(nb > 0, "should have replaced some uses"); 1398 replaced = true; 1399 } 1400 } 1401 if (!replaced) { 1402 stack.set_index(idx+1); 1403 } 1404 } 1405 } else { 1406 stack.pop(); 1407 clones.pop(); 1408 } 1409 } while (stack.size() > 0); 1410 assert(stack.size() == 0 && clones.size() == 0, ""); 1411 } 1412 } 1413 1414 for (int i = 0; i < state->load_reference_barriers_count(); i++) { 1415 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); 1416 if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) { 1417 continue; 1418 } 1419 Node* ctrl = phase->get_ctrl(lrb); 1420 IdealLoopTree* loop = phase->get_loop(ctrl); 1421 if (loop->_head->is_OuterStripMinedLoop()) { 1422 // Expanding a barrier here will break loop strip mining 1423 // verification. Transform the loop so the loop nest doesn't 1424 // appear as strip mined. 1425 OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop(); 1426 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); 1427 } 1428 } 1429 1430 // Expand load-reference-barriers 1431 MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase); 1432 Unique_Node_List uses_to_ignore; 1433 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { 1434 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); 1435 if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) { 1436 phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); 1437 continue; 1438 } 1439 uint last = phase->C->unique(); 1440 Node* ctrl = phase->get_ctrl(lrb); 1441 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); 1442 1443 1444 Node* orig_ctrl = ctrl; 1445 1446 Node* raw_mem = fixer.find_mem(ctrl, lrb); 1447 Node* init_raw_mem = raw_mem; 1448 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL); 1449 1450 IdealLoopTree *loop = phase->get_loop(ctrl); 1451 CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn()); 1452 Node* unc_ctrl = NULL; 1453 if (unc != NULL) { 1454 if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) { 1455 unc = NULL; 1456 } else { 1457 unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control); 1458 } 1459 } 1460 1461 Node* uncasted_val = val; 1462 if (unc != NULL) { 1463 uncasted_val = val->in(1); 1464 } 1465 1466 Node* heap_stable_ctrl = NULL; 1467 Node* null_ctrl = NULL; 1468 1469 assert(val->bottom_type()->make_oopptr(), "need oop"); 1470 assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant"); 1471 1472 enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT }; 1473 Node* region = new RegionNode(PATH_LIMIT); 1474 Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr()); 1475 Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 1476 1477 // Stable path. 1478 test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase); 1479 IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If(); 1480 1481 // Heap stable case 1482 region->init_req(_heap_stable, heap_stable_ctrl); 1483 val_phi->init_req(_heap_stable, uncasted_val); 1484 raw_mem_phi->init_req(_heap_stable, raw_mem); 1485 1486 Node* reg2_ctrl = NULL; 1487 // Null case 1488 test_null(ctrl, val, null_ctrl, phase); 1489 if (null_ctrl != NULL) { 1490 reg2_ctrl = null_ctrl->in(0); 1491 region->init_req(_null_path, null_ctrl); 1492 val_phi->init_req(_null_path, uncasted_val); 1493 raw_mem_phi->init_req(_null_path, raw_mem); 1494 } else { 1495 region->del_req(_null_path); 1496 val_phi->del_req(_null_path); 1497 raw_mem_phi->del_req(_null_path); 1498 } 1499 1500 // Test for in-cset. 1501 // Wires !in_cset(obj) to slot 2 of region and phis 1502 Node* not_cset_ctrl = NULL; 1503 in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase); 1504 if (not_cset_ctrl != NULL) { 1505 if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0); 1506 region->init_req(_not_cset, not_cset_ctrl); 1507 val_phi->init_req(_not_cset, uncasted_val); 1508 raw_mem_phi->init_req(_not_cset, raw_mem); 1509 } 1510 1511 // Resolve object when orig-value is in cset. 1512 // Make the unconditional resolve for fwdptr. 1513 Node* new_val = uncasted_val; 1514 if (unc_ctrl != NULL) { 1515 // Clone the null check in this branch to allow implicit null check 1516 new_val = clone_null_check(ctrl, val, unc_ctrl, phase); 1517 fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase); 1518 1519 IfNode* iff = unc_ctrl->in(0)->as_If(); 1520 phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1)); 1521 } 1522 1523 // Call lrb-stub and wire up that path in slots 4 1524 Node* result_mem = NULL; 1525 Node* fwd = new_val; 1526 Node* addr; 1527 if (ShenandoahSelfFixing) { 1528 VectorSet visited(Thread::current()->resource_area()); 1529 addr = get_load_addr(phase, visited, lrb); 1530 } else { 1531 addr = phase->igvn().zerocon(T_OBJECT); 1532 } 1533 call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, phase); 1534 region->init_req(_evac_path, ctrl); 1535 val_phi->init_req(_evac_path, fwd); 1536 raw_mem_phi->init_req(_evac_path, result_mem); 1537 1538 phase->register_control(region, loop, heap_stable_iff); 1539 Node* out_val = val_phi; 1540 phase->register_new_node(val_phi, region); 1541 phase->register_new_node(raw_mem_phi, region); 1542 1543 fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase); 1544 1545 ctrl = orig_ctrl; 1546 1547 if (unc != NULL) { 1548 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { 1549 Node* u = val->fast_out(i); 1550 Node* c = phase->ctrl_or_self(u); 1551 if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) { 1552 phase->igvn().rehash_node_delayed(u); 1553 int nb = u->replace_edge(val, out_val); 1554 --i, imax -= nb; 1555 } 1556 } 1557 if (val->outcnt() == 0) { 1558 phase->igvn()._worklist.push(val); 1559 } 1560 } 1561 phase->igvn().replace_node(lrb, out_val); 1562 1563 follow_barrier_uses(out_val, ctrl, uses, phase); 1564 1565 for(uint next = 0; next < uses.size(); next++ ) { 1566 Node *n = uses.at(next); 1567 assert(phase->get_ctrl(n) == ctrl, "bad control"); 1568 assert(n != init_raw_mem, "should leave input raw mem above the barrier"); 1569 phase->set_ctrl(n, region); 1570 follow_barrier_uses(n, ctrl, uses, phase); 1571 } 1572 1573 // The slow path call produces memory: hook the raw memory phi 1574 // from the expanded load reference barrier with the rest of the graph 1575 // which may require adding memory phis at every post dominated 1576 // region and at enclosing loop heads. Use the memory state 1577 // collected in memory_nodes to fix the memory graph. Update that 1578 // memory state as we go. 1579 fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses); 1580 } 1581 // Done expanding load-reference-barriers. 1582 assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced"); 1583 1584 for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) { 1585 Node* barrier = state->enqueue_barrier(i); 1586 Node* pre_val = barrier->in(1); 1587 1588 if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) { 1589 ShouldNotReachHere(); 1590 continue; 1591 } 1592 1593 Node* ctrl = phase->get_ctrl(barrier); 1594 1595 if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) { 1596 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move"); 1597 ctrl = ctrl->in(0)->in(0); 1598 phase->set_ctrl(barrier, ctrl); 1599 } else if (ctrl->is_CallRuntime()) { 1600 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move"); 1601 ctrl = ctrl->in(0); 1602 phase->set_ctrl(barrier, ctrl); 1603 } 1604 1605 Node* init_ctrl = ctrl; 1606 IdealLoopTree* loop = phase->get_loop(ctrl); 1607 Node* raw_mem = fixer.find_mem(ctrl, barrier); 1608 Node* init_raw_mem = raw_mem; 1609 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL); 1610 Node* heap_stable_ctrl = NULL; 1611 Node* null_ctrl = NULL; 1612 uint last = phase->C->unique(); 1613 1614 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT }; 1615 Node* region = new RegionNode(PATH_LIMIT); 1616 Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 1617 1618 enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 }; 1619 Node* region2 = new RegionNode(PATH_LIMIT2); 1620 Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 1621 1622 // Stable path. 1623 test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase); 1624 region->init_req(_heap_stable, heap_stable_ctrl); 1625 phi->init_req(_heap_stable, raw_mem); 1626 1627 // Null path 1628 Node* reg2_ctrl = NULL; 1629 test_null(ctrl, pre_val, null_ctrl, phase); 1630 if (null_ctrl != NULL) { 1631 reg2_ctrl = null_ctrl->in(0); 1632 region2->init_req(_null_path, null_ctrl); 1633 phi2->init_req(_null_path, raw_mem); 1634 } else { 1635 region2->del_req(_null_path); 1636 phi2->del_req(_null_path); 1637 } 1638 1639 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); 1640 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 1641 Node* thread = new ThreadLocalNode(); 1642 phase->register_new_node(thread, ctrl); 1643 Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset)); 1644 phase->register_new_node(buffer_adr, ctrl); 1645 Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset)); 1646 phase->register_new_node(index_adr, ctrl); 1647 1648 BasicType index_bt = TypeX_X->basic_type(); 1649 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size."); 1650 const TypePtr* adr_type = TypeRawPtr::BOTTOM; 1651 Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered); 1652 phase->register_new_node(index, ctrl); 1653 Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0)); 1654 phase->register_new_node(index_cmp, ctrl); 1655 Node* index_test = new BoolNode(index_cmp, BoolTest::ne); 1656 phase->register_new_node(index_test, ctrl); 1657 IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); 1658 if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff; 1659 phase->register_control(queue_full_iff, loop, ctrl); 1660 Node* not_full = new IfTrueNode(queue_full_iff); 1661 phase->register_control(not_full, loop, queue_full_iff); 1662 Node* full = new IfFalseNode(queue_full_iff); 1663 phase->register_control(full, loop, queue_full_iff); 1664 1665 ctrl = not_full; 1666 1667 Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t))); 1668 phase->register_new_node(next_index, ctrl); 1669 1670 Node* buffer = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered); 1671 phase->register_new_node(buffer, ctrl); 1672 Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index); 1673 phase->register_new_node(log_addr, ctrl); 1674 Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered); 1675 phase->register_new_node(log_store, ctrl); 1676 // update the index 1677 Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered); 1678 phase->register_new_node(index_update, ctrl); 1679 1680 // Fast-path case 1681 region2->init_req(_fast_path, ctrl); 1682 phi2->init_req(_fast_path, index_update); 1683 1684 ctrl = full; 1685 1686 Node* base = find_bottom_mem(ctrl, phase); 1687 1688 MergeMemNode* mm = MergeMemNode::make(base); 1689 mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); 1690 phase->register_new_node(mm, ctrl); 1691 1692 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM); 1693 call->init_req(TypeFunc::Control, ctrl); 1694 call->init_req(TypeFunc::I_O, phase->C->top()); 1695 call->init_req(TypeFunc::Memory, mm); 1696 call->init_req(TypeFunc::FramePtr, phase->C->top()); 1697 call->init_req(TypeFunc::ReturnAdr, phase->C->top()); 1698 call->init_req(TypeFunc::Parms, pre_val); 1699 call->init_req(TypeFunc::Parms+1, thread); 1700 phase->register_control(call, loop, ctrl); 1701 1702 Node* ctrl_proj = new ProjNode(call, TypeFunc::Control); 1703 phase->register_control(ctrl_proj, loop, call); 1704 Node* mem_proj = new ProjNode(call, TypeFunc::Memory); 1705 phase->register_new_node(mem_proj, call); 1706 1707 // Slow-path case 1708 region2->init_req(_slow_path, ctrl_proj); 1709 phi2->init_req(_slow_path, mem_proj); 1710 1711 phase->register_control(region2, loop, reg2_ctrl); 1712 phase->register_new_node(phi2, region2); 1713 1714 region->init_req(_heap_unstable, region2); 1715 phi->init_req(_heap_unstable, phi2); 1716 1717 phase->register_control(region, loop, heap_stable_ctrl->in(0)); 1718 phase->register_new_node(phi, region); 1719 1720 fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase); 1721 for(uint next = 0; next < uses.size(); next++ ) { 1722 Node *n = uses.at(next); 1723 assert(phase->get_ctrl(n) == init_ctrl, "bad control"); 1724 assert(n != init_raw_mem, "should leave input raw mem above the barrier"); 1725 phase->set_ctrl(n, region); 1726 follow_barrier_uses(n, init_ctrl, uses, phase); 1727 } 1728 fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses); 1729 1730 phase->igvn().replace_node(barrier, pre_val); 1731 } 1732 assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced"); 1733 1734 } 1735 1736 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) { 1737 if (visited.test_set(in->_idx)) { 1738 return NULL; 1739 } 1740 switch (in->Opcode()) { 1741 case Op_Proj: 1742 return get_load_addr(phase, visited, in->in(0)); 1743 case Op_CastPP: 1744 case Op_CheckCastPP: 1745 case Op_DecodeN: 1746 case Op_EncodeP: 1747 return get_load_addr(phase, visited, in->in(1)); 1748 case Op_LoadN: 1749 case Op_LoadP: 1750 return in->in(MemNode::Address); 1751 case Op_CompareAndExchangeN: 1752 case Op_CompareAndExchangeP: 1753 case Op_GetAndSetN: 1754 case Op_GetAndSetP: 1755 case Op_ShenandoahCompareAndExchangeP: 1756 case Op_ShenandoahCompareAndExchangeN: 1757 // Those instructions would just have stored a different 1758 // value into the field. No use to attempt to fix it at this point. 1759 return phase->igvn().zerocon(T_OBJECT); 1760 case Op_Phi: { 1761 Node* addr = NULL; 1762 for (uint i = 1; i < in->req(); i++) { 1763 Node* addr1 = get_load_addr(phase, visited, in->in(i)); 1764 if (addr == NULL) { 1765 addr = addr1; 1766 } 1767 if (addr != addr1) { 1768 return phase->igvn().zerocon(T_OBJECT); 1769 } 1770 } 1771 return addr; 1772 } 1773 case Op_ShenandoahLoadReferenceBarrier: 1774 return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); 1775 case Op_ShenandoahEnqueueBarrier: 1776 return get_load_addr(phase, visited, in->in(1)); 1777 case Op_CallDynamicJava: 1778 case Op_CallLeaf: 1779 case Op_CallStaticJava: 1780 case Op_ConN: 1781 case Op_ConP: 1782 return phase->igvn().zerocon(T_OBJECT); 1783 default: 1784 #ifdef ASSERT 1785 in->dump(); 1786 ShouldNotReachHere(); 1787 #endif 1788 return phase->igvn().zerocon(T_OBJECT); 1789 } 1790 1791 } 1792 1793 void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) { 1794 IdealLoopTree *loop = phase->get_loop(iff); 1795 Node* loop_head = loop->_head; 1796 Node* entry_c = loop_head->in(LoopNode::EntryControl); 1797 1798 Node* bol = iff->in(1); 1799 Node* cmp = bol->in(1); 1800 Node* andi = cmp->in(1); 1801 Node* load = andi->in(1); 1802 1803 assert(is_gc_state_load(load), "broken"); 1804 if (!phase->is_dominator(load->in(0), entry_c)) { 1805 Node* mem_ctrl = NULL; 1806 Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase); 1807 load = load->clone(); 1808 load->set_req(MemNode::Memory, mem); 1809 load->set_req(0, entry_c); 1810 phase->register_new_node(load, entry_c); 1811 andi = andi->clone(); 1812 andi->set_req(1, load); 1813 phase->register_new_node(andi, entry_c); 1814 cmp = cmp->clone(); 1815 cmp->set_req(1, andi); 1816 phase->register_new_node(cmp, entry_c); 1817 bol = bol->clone(); 1818 bol->set_req(1, cmp); 1819 phase->register_new_node(bol, entry_c); 1820 1821 Node* old_bol =iff->in(1); 1822 phase->igvn().replace_input_of(iff, 1, bol); 1823 } 1824 } 1825 1826 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) { 1827 if (!n->is_If() || n->is_CountedLoopEnd()) { 1828 return false; 1829 } 1830 Node* region = n->in(0); 1831 1832 if (!region->is_Region()) { 1833 return false; 1834 } 1835 Node* dom = phase->idom(region); 1836 if (!dom->is_If()) { 1837 return false; 1838 } 1839 1840 if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) { 1841 return false; 1842 } 1843 1844 IfNode* dom_if = dom->as_If(); 1845 Node* proj_true = dom_if->proj_out(1); 1846 Node* proj_false = dom_if->proj_out(0); 1847 1848 for (uint i = 1; i < region->req(); i++) { 1849 if (phase->is_dominator(proj_true, region->in(i))) { 1850 continue; 1851 } 1852 if (phase->is_dominator(proj_false, region->in(i))) { 1853 continue; 1854 } 1855 return false; 1856 } 1857 1858 return true; 1859 } 1860 1861 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) { 1862 assert(is_heap_stable_test(n), "no other tests"); 1863 if (identical_backtoback_ifs(n, phase)) { 1864 Node* n_ctrl = n->in(0); 1865 if (phase->can_split_if(n_ctrl)) { 1866 IfNode* dom_if = phase->idom(n_ctrl)->as_If(); 1867 if (is_heap_stable_test(n)) { 1868 Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1); 1869 assert(is_gc_state_load(gc_state_load), "broken"); 1870 Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1); 1871 assert(is_gc_state_load(dom_gc_state_load), "broken"); 1872 if (gc_state_load != dom_gc_state_load) { 1873 phase->igvn().replace_node(gc_state_load, dom_gc_state_load); 1874 } 1875 } 1876 PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1)); 1877 Node* proj_true = dom_if->proj_out(1); 1878 Node* proj_false = dom_if->proj_out(0); 1879 Node* con_true = phase->igvn().makecon(TypeInt::ONE); 1880 Node* con_false = phase->igvn().makecon(TypeInt::ZERO); 1881 1882 for (uint i = 1; i < n_ctrl->req(); i++) { 1883 if (phase->is_dominator(proj_true, n_ctrl->in(i))) { 1884 bolphi->init_req(i, con_true); 1885 } else { 1886 assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if"); 1887 bolphi->init_req(i, con_false); 1888 } 1889 } 1890 phase->register_new_node(bolphi, n_ctrl); 1891 phase->igvn().replace_input_of(n, 1, bolphi); 1892 phase->do_split_if(n); 1893 } 1894 } 1895 } 1896 1897 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) { 1898 // Find first invariant test that doesn't exit the loop 1899 LoopNode *head = loop->_head->as_Loop(); 1900 IfNode* unswitch_iff = NULL; 1901 Node* n = head->in(LoopNode::LoopBackControl); 1902 int loop_has_sfpts = -1; 1903 while (n != head) { 1904 Node* n_dom = phase->idom(n); 1905 if (n->is_Region()) { 1906 if (n_dom->is_If()) { 1907 IfNode* iff = n_dom->as_If(); 1908 if (iff->in(1)->is_Bool()) { 1909 BoolNode* bol = iff->in(1)->as_Bool(); 1910 if (bol->in(1)->is_Cmp()) { 1911 // If condition is invariant and not a loop exit, 1912 // then found reason to unswitch. 1913 if (is_heap_stable_test(iff) && 1914 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) { 1915 assert(!loop->is_loop_exit(iff), "both branches should be in the loop"); 1916 if (loop_has_sfpts == -1) { 1917 for(uint i = 0; i < loop->_body.size(); i++) { 1918 Node *m = loop->_body[i]; 1919 if (m->is_SafePoint() && !m->is_CallLeaf()) { 1920 loop_has_sfpts = 1; 1921 break; 1922 } 1923 } 1924 if (loop_has_sfpts == -1) { 1925 loop_has_sfpts = 0; 1926 } 1927 } 1928 if (!loop_has_sfpts) { 1929 unswitch_iff = iff; 1930 } 1931 } 1932 } 1933 } 1934 } 1935 } 1936 n = n_dom; 1937 } 1938 return unswitch_iff; 1939 } 1940 1941 1942 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) { 1943 Node_List heap_stable_tests; 1944 Node_List gc_state_loads; 1945 stack.push(phase->C->start(), 0); 1946 do { 1947 Node* n = stack.node(); 1948 uint i = stack.index(); 1949 1950 if (i < n->outcnt()) { 1951 Node* u = n->raw_out(i); 1952 stack.set_index(i+1); 1953 if (!visited.test_set(u->_idx)) { 1954 stack.push(u, 0); 1955 } 1956 } else { 1957 stack.pop(); 1958 if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) { 1959 gc_state_loads.push(n); 1960 } 1961 if (n->is_If() && is_heap_stable_test(n)) { 1962 heap_stable_tests.push(n); 1963 } 1964 } 1965 } while (stack.size() > 0); 1966 1967 bool progress; 1968 do { 1969 progress = false; 1970 for (uint i = 0; i < gc_state_loads.size(); i++) { 1971 Node* n = gc_state_loads.at(i); 1972 if (n->outcnt() != 0) { 1973 progress |= try_common_gc_state_load(n, phase); 1974 } 1975 } 1976 } while (progress); 1977 1978 for (uint i = 0; i < heap_stable_tests.size(); i++) { 1979 Node* n = heap_stable_tests.at(i); 1980 assert(is_heap_stable_test(n), "only evacuation test"); 1981 merge_back_to_back_tests(n, phase); 1982 } 1983 1984 if (!phase->C->major_progress()) { 1985 VectorSet seen(Thread::current()->resource_area()); 1986 for (uint i = 0; i < heap_stable_tests.size(); i++) { 1987 Node* n = heap_stable_tests.at(i); 1988 IdealLoopTree* loop = phase->get_loop(n); 1989 if (loop != phase->ltree_root() && 1990 loop->_child == NULL && 1991 !loop->_irreducible) { 1992 LoopNode* head = loop->_head->as_Loop(); 1993 if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) && 1994 !seen.test_set(head->_idx)) { 1995 IfNode* iff = find_unswitching_candidate(loop, phase); 1996 if (iff != NULL) { 1997 Node* bol = iff->in(1); 1998 if (head->is_strip_mined()) { 1999 head->verify_strip_mined(0); 2000 } 2001 move_heap_stable_test_out_of_loop(iff, phase); 2002 2003 AutoNodeBudget node_budget(phase); 2004 2005 if (loop->policy_unswitching(phase)) { 2006 if (head->is_strip_mined()) { 2007 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop(); 2008 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase); 2009 } 2010 phase->do_unswitching(loop, old_new); 2011 } else { 2012 // Not proceeding with unswitching. Move load back in 2013 // the loop. 2014 phase->igvn().replace_input_of(iff, 1, bol); 2015 } 2016 } 2017 } 2018 } 2019 } 2020 } 2021 } 2022 2023 #ifdef ASSERT 2024 void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) { 2025 const bool trace = false; 2026 ResourceMark rm; 2027 Unique_Node_List nodes; 2028 Unique_Node_List controls; 2029 Unique_Node_List memories; 2030 2031 nodes.push(root); 2032 for (uint next = 0; next < nodes.size(); next++) { 2033 Node *n = nodes.at(next); 2034 if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) { 2035 controls.push(n); 2036 if (trace) { tty->print("XXXXXX verifying"); n->dump(); } 2037 for (uint next2 = 0; next2 < controls.size(); next2++) { 2038 Node *m = controls.at(next2); 2039 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { 2040 Node* u = m->fast_out(i); 2041 if (u->is_CFG() && !u->is_Root() && 2042 !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) && 2043 !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) { 2044 if (trace) { tty->print("XXXXXX pushing control"); u->dump(); } 2045 controls.push(u); 2046 } 2047 } 2048 } 2049 memories.push(n->as_Call()->proj_out(TypeFunc::Memory)); 2050 for (uint next2 = 0; next2 < memories.size(); next2++) { 2051 Node *m = memories.at(next2); 2052 assert(m->bottom_type() == Type::MEMORY, ""); 2053 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { 2054 Node* u = m->fast_out(i); 2055 if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) { 2056 if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } 2057 memories.push(u); 2058 } else if (u->is_LoadStore()) { 2059 if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); } 2060 memories.push(u->find_out_with(Op_SCMemProj)); 2061 } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) { 2062 if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } 2063 memories.push(u); 2064 } else if (u->is_Phi()) { 2065 assert(u->bottom_type() == Type::MEMORY, ""); 2066 if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) { 2067 assert(controls.member(u->in(0)), ""); 2068 if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } 2069 memories.push(u); 2070 } 2071 } else if (u->is_SafePoint() || u->is_MemBar()) { 2072 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 2073 Node* uu = u->fast_out(j); 2074 if (uu->bottom_type() == Type::MEMORY) { 2075 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); } 2076 memories.push(uu); 2077 } 2078 } 2079 } 2080 } 2081 } 2082 for (uint next2 = 0; next2 < controls.size(); next2++) { 2083 Node *m = controls.at(next2); 2084 if (m->is_Region()) { 2085 bool all_in = true; 2086 for (uint i = 1; i < m->req(); i++) { 2087 if (!controls.member(m->in(i))) { 2088 all_in = false; 2089 break; 2090 } 2091 } 2092 if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); } 2093 bool found_phi = false; 2094 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) { 2095 Node* u = m->fast_out(j); 2096 if (u->is_Phi() && memories.member(u)) { 2097 found_phi = true; 2098 for (uint i = 1; i < u->req() && found_phi; i++) { 2099 Node* k = u->in(i); 2100 if (memories.member(k) != controls.member(m->in(i))) { 2101 found_phi = false; 2102 } 2103 } 2104 } 2105 } 2106 assert(found_phi || all_in, ""); 2107 } 2108 } 2109 controls.clear(); 2110 memories.clear(); 2111 } 2112 for( uint i = 0; i < n->len(); ++i ) { 2113 Node *m = n->in(i); 2114 if (m != NULL) { 2115 nodes.push(m); 2116 } 2117 } 2118 } 2119 } 2120 #endif 2121 2122 ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) { 2123 ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this); 2124 } 2125 2126 const Type* ShenandoahEnqueueBarrierNode::bottom_type() const { 2127 if (in(1) == NULL || in(1)->is_top()) { 2128 return Type::TOP; 2129 } 2130 const Type* t = in(1)->bottom_type(); 2131 if (t == TypePtr::NULL_PTR) { 2132 return t; 2133 } 2134 return t->is_oopptr()->cast_to_nonconst(); 2135 } 2136 2137 const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const { 2138 if (in(1) == NULL) { 2139 return Type::TOP; 2140 } 2141 const Type* t = phase->type(in(1)); 2142 if (t == Type::TOP) { 2143 return Type::TOP; 2144 } 2145 if (t == TypePtr::NULL_PTR) { 2146 return t; 2147 } 2148 return t->is_oopptr()->cast_to_nonconst(); 2149 } 2150 2151 int ShenandoahEnqueueBarrierNode::needed(Node* n) { 2152 if (n == NULL || 2153 n->is_Allocate() || 2154 n->Opcode() == Op_ShenandoahEnqueueBarrier || 2155 n->bottom_type() == TypePtr::NULL_PTR || 2156 (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) { 2157 return NotNeeded; 2158 } 2159 if (n->is_Phi() || 2160 n->is_CMove()) { 2161 return MaybeNeeded; 2162 } 2163 return Needed; 2164 } 2165 2166 Node* ShenandoahEnqueueBarrierNode::next(Node* n) { 2167 for (;;) { 2168 if (n == NULL) { 2169 return n; 2170 } else if (n->bottom_type() == TypePtr::NULL_PTR) { 2171 return n; 2172 } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) { 2173 return n; 2174 } else if (n->is_ConstraintCast() || 2175 n->Opcode() == Op_DecodeN || 2176 n->Opcode() == Op_EncodeP) { 2177 n = n->in(1); 2178 } else if (n->is_Proj()) { 2179 n = n->in(0); 2180 } else { 2181 return n; 2182 } 2183 } 2184 ShouldNotReachHere(); 2185 return NULL; 2186 } 2187 2188 Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) { 2189 PhaseIterGVN* igvn = phase->is_IterGVN(); 2190 2191 Node* n = next(in(1)); 2192 2193 int cont = needed(n); 2194 2195 if (cont == NotNeeded) { 2196 return in(1); 2197 } else if (cont == MaybeNeeded) { 2198 if (igvn == NULL) { 2199 phase->record_for_igvn(this); 2200 return this; 2201 } else { 2202 ResourceMark rm; 2203 Unique_Node_List wq; 2204 uint wq_i = 0; 2205 2206 for (;;) { 2207 if (n->is_Phi()) { 2208 for (uint i = 1; i < n->req(); i++) { 2209 Node* m = n->in(i); 2210 if (m != NULL) { 2211 wq.push(m); 2212 } 2213 } 2214 } else { 2215 assert(n->is_CMove(), "nothing else here"); 2216 Node* m = n->in(CMoveNode::IfFalse); 2217 wq.push(m); 2218 m = n->in(CMoveNode::IfTrue); 2219 wq.push(m); 2220 } 2221 Node* orig_n = NULL; 2222 do { 2223 if (wq_i >= wq.size()) { 2224 return in(1); 2225 } 2226 n = wq.at(wq_i); 2227 wq_i++; 2228 orig_n = n; 2229 n = next(n); 2230 cont = needed(n); 2231 if (cont == Needed) { 2232 return this; 2233 } 2234 } while (cont != MaybeNeeded || (orig_n != n && wq.member(n))); 2235 } 2236 } 2237 } 2238 2239 return this; 2240 } 2241 2242 #ifdef ASSERT 2243 static bool has_never_branch(Node* root) { 2244 for (uint i = 1; i < root->req(); i++) { 2245 Node* in = root->in(i); 2246 if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) { 2247 return true; 2248 } 2249 } 2250 return false; 2251 } 2252 #endif 2253 2254 void MemoryGraphFixer::collect_memory_nodes() { 2255 Node_Stack stack(0); 2256 VectorSet visited(Thread::current()->resource_area()); 2257 Node_List regions; 2258 2259 // Walk the raw memory graph and create a mapping from CFG node to 2260 // memory node. Exclude phis for now. 2261 stack.push(_phase->C->root(), 1); 2262 do { 2263 Node* n = stack.node(); 2264 int opc = n->Opcode(); 2265 uint i = stack.index(); 2266 if (i < n->req()) { 2267 Node* mem = NULL; 2268 if (opc == Op_Root) { 2269 Node* in = n->in(i); 2270 int in_opc = in->Opcode(); 2271 if (in_opc == Op_Return || in_opc == Op_Rethrow) { 2272 mem = in->in(TypeFunc::Memory); 2273 } else if (in_opc == Op_Halt) { 2274 if (!in->in(0)->is_Region()) { 2275 Node* proj = in->in(0); 2276 assert(proj->is_Proj(), ""); 2277 Node* in = proj->in(0); 2278 assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), ""); 2279 if (in->is_CallStaticJava()) { 2280 mem = in->in(TypeFunc::Memory); 2281 } else if (in->Opcode() == Op_Catch) { 2282 Node* call = in->in(0)->in(0); 2283 assert(call->is_Call(), ""); 2284 mem = call->in(TypeFunc::Memory); 2285 } else if (in->Opcode() == Op_NeverBranch) { 2286 ResourceMark rm; 2287 Unique_Node_List wq; 2288 wq.push(in); 2289 wq.push(in->as_Multi()->proj_out(0)); 2290 for (uint j = 1; j < wq.size(); j++) { 2291 Node* c = wq.at(j); 2292 assert(!c->is_Root(), "shouldn't leave loop"); 2293 if (c->is_SafePoint()) { 2294 assert(mem == NULL, "only one safepoint"); 2295 mem = c->in(TypeFunc::Memory); 2296 } 2297 for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) { 2298 Node* u = c->fast_out(k); 2299 if (u->is_CFG()) { 2300 wq.push(u); 2301 } 2302 } 2303 } 2304 assert(mem != NULL, "should have found safepoint"); 2305 } 2306 } 2307 } else { 2308 #ifdef ASSERT 2309 n->dump(); 2310 in->dump(); 2311 #endif 2312 ShouldNotReachHere(); 2313 } 2314 } else { 2315 assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, ""); 2316 assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, ""); 2317 mem = n->in(i); 2318 } 2319 i++; 2320 stack.set_index(i); 2321 if (mem == NULL) { 2322 continue; 2323 } 2324 for (;;) { 2325 if (visited.test_set(mem->_idx) || mem->is_Start()) { 2326 break; 2327 } 2328 if (mem->is_Phi()) { 2329 stack.push(mem, 2); 2330 mem = mem->in(1); 2331 } else if (mem->is_Proj()) { 2332 stack.push(mem, mem->req()); 2333 mem = mem->in(0); 2334 } else if (mem->is_SafePoint() || mem->is_MemBar()) { 2335 mem = mem->in(TypeFunc::Memory); 2336 } else if (mem->is_MergeMem()) { 2337 MergeMemNode* mm = mem->as_MergeMem(); 2338 mem = mm->memory_at(_alias); 2339 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { 2340 assert(_alias == Compile::AliasIdxRaw, ""); 2341 stack.push(mem, mem->req()); 2342 mem = mem->in(MemNode::Memory); 2343 } else { 2344 #ifdef ASSERT 2345 mem->dump(); 2346 #endif 2347 ShouldNotReachHere(); 2348 } 2349 } 2350 } else { 2351 if (n->is_Phi()) { 2352 // Nothing 2353 } else if (!n->is_Root()) { 2354 Node* c = get_ctrl(n); 2355 _memory_nodes.map(c->_idx, n); 2356 } 2357 stack.pop(); 2358 } 2359 } while(stack.is_nonempty()); 2360 2361 // Iterate over CFG nodes in rpo and propagate memory state to 2362 // compute memory state at regions, creating new phis if needed. 2363 Node_List rpo_list; 2364 visited.Clear(); 2365 _phase->rpo(_phase->C->root(), stack, visited, rpo_list); 2366 Node* root = rpo_list.pop(); 2367 assert(root == _phase->C->root(), ""); 2368 2369 const bool trace = false; 2370 #ifdef ASSERT 2371 if (trace) { 2372 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2373 Node* c = rpo_list.at(i); 2374 if (_memory_nodes[c->_idx] != NULL) { 2375 tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); 2376 } 2377 } 2378 } 2379 #endif 2380 uint last = _phase->C->unique(); 2381 2382 #ifdef ASSERT 2383 uint8_t max_depth = 0; 2384 for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) { 2385 IdealLoopTree* lpt = iter.current(); 2386 max_depth = MAX2(max_depth, lpt->_nest); 2387 } 2388 #endif 2389 2390 bool progress = true; 2391 int iteration = 0; 2392 Node_List dead_phis; 2393 while (progress) { 2394 progress = false; 2395 iteration++; 2396 assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), ""); 2397 if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); } 2398 IdealLoopTree* last_updated_ilt = NULL; 2399 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2400 Node* c = rpo_list.at(i); 2401 2402 Node* prev_mem = _memory_nodes[c->_idx]; 2403 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 2404 Node* prev_region = regions[c->_idx]; 2405 Node* unique = NULL; 2406 for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) { 2407 Node* m = _memory_nodes[c->in(j)->_idx]; 2408 assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state"); 2409 if (m != NULL) { 2410 if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) { 2411 assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), ""); 2412 // continue 2413 } else if (unique == NULL) { 2414 unique = m; 2415 } else if (m == unique) { 2416 // continue 2417 } else { 2418 unique = NodeSentinel; 2419 } 2420 } 2421 } 2422 assert(unique != NULL, "empty phi???"); 2423 if (unique != NodeSentinel) { 2424 if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) { 2425 dead_phis.push(prev_region); 2426 } 2427 regions.map(c->_idx, unique); 2428 } else { 2429 Node* phi = NULL; 2430 if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) { 2431 phi = prev_region; 2432 for (uint k = 1; k < c->req(); k++) { 2433 Node* m = _memory_nodes[c->in(k)->_idx]; 2434 assert(m != NULL, "expect memory state"); 2435 phi->set_req(k, m); 2436 } 2437 } else { 2438 for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) { 2439 Node* u = c->fast_out(j); 2440 if (u->is_Phi() && u->bottom_type() == Type::MEMORY && 2441 (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) { 2442 phi = u; 2443 for (uint k = 1; k < c->req() && phi != NULL; k++) { 2444 Node* m = _memory_nodes[c->in(k)->_idx]; 2445 assert(m != NULL, "expect memory state"); 2446 if (u->in(k) != m) { 2447 phi = NULL; 2448 } 2449 } 2450 } 2451 } 2452 if (phi == NULL) { 2453 phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias)); 2454 for (uint k = 1; k < c->req(); k++) { 2455 Node* m = _memory_nodes[c->in(k)->_idx]; 2456 assert(m != NULL, "expect memory state"); 2457 phi->init_req(k, m); 2458 } 2459 } 2460 } 2461 assert(phi != NULL, ""); 2462 regions.map(c->_idx, phi); 2463 } 2464 Node* current_region = regions[c->_idx]; 2465 if (current_region != prev_region) { 2466 progress = true; 2467 if (prev_region == prev_mem) { 2468 _memory_nodes.map(c->_idx, current_region); 2469 } 2470 } 2471 } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) { 2472 Node* m = _memory_nodes[_phase->idom(c)->_idx]; 2473 assert(m != NULL, "expect memory state"); 2474 if (m != prev_mem) { 2475 _memory_nodes.map(c->_idx, m); 2476 progress = true; 2477 } 2478 } 2479 #ifdef ASSERT 2480 if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); } 2481 #endif 2482 } 2483 } 2484 2485 // Replace existing phi with computed memory state for that region 2486 // if different (could be a new phi or a dominating memory node if 2487 // that phi was found to be useless). 2488 while (dead_phis.size() > 0) { 2489 Node* n = dead_phis.pop(); 2490 n->replace_by(_phase->C->top()); 2491 n->destruct(); 2492 } 2493 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2494 Node* c = rpo_list.at(i); 2495 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 2496 Node* n = regions[c->_idx]; 2497 if (n->is_Phi() && n->_idx >= last && n->in(0) == c) { 2498 _phase->register_new_node(n, c); 2499 } 2500 } 2501 } 2502 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2503 Node* c = rpo_list.at(i); 2504 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 2505 Node* n = regions[c->_idx]; 2506 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { 2507 Node* u = c->fast_out(i); 2508 if (u->is_Phi() && u->bottom_type() == Type::MEMORY && 2509 u != n) { 2510 if (u->adr_type() == TypePtr::BOTTOM) { 2511 fix_memory_uses(u, n, n, c); 2512 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 2513 _phase->lazy_replace(u, n); 2514 --i; --imax; 2515 } 2516 } 2517 } 2518 } 2519 } 2520 } 2521 2522 Node* MemoryGraphFixer::get_ctrl(Node* n) const { 2523 Node* c = _phase->get_ctrl(n); 2524 if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) { 2525 assert(c == n->in(0), ""); 2526 CallNode* call = c->as_Call(); 2527 CallProjections projs; 2528 call->extract_projections(&projs, true, false); 2529 if (projs.catchall_memproj != NULL) { 2530 if (projs.fallthrough_memproj == n) { 2531 c = projs.fallthrough_catchproj; 2532 } else { 2533 assert(projs.catchall_memproj == n, ""); 2534 c = projs.catchall_catchproj; 2535 } 2536 } 2537 } 2538 return c; 2539 } 2540 2541 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const { 2542 if (_phase->has_ctrl(n)) 2543 return get_ctrl(n); 2544 else { 2545 assert (n->is_CFG(), "must be a CFG node"); 2546 return n; 2547 } 2548 } 2549 2550 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const { 2551 return m != NULL && get_ctrl(m) == c; 2552 } 2553 2554 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const { 2555 assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, ""); 2556 Node* mem = _memory_nodes[ctrl->_idx]; 2557 Node* c = ctrl; 2558 while (!mem_is_valid(mem, c) && 2559 (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) { 2560 c = _phase->idom(c); 2561 mem = _memory_nodes[c->_idx]; 2562 } 2563 if (n != NULL && mem_is_valid(mem, c)) { 2564 while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) { 2565 mem = next_mem(mem, _alias); 2566 } 2567 if (mem->is_MergeMem()) { 2568 mem = mem->as_MergeMem()->memory_at(_alias); 2569 } 2570 if (!mem_is_valid(mem, c)) { 2571 do { 2572 c = _phase->idom(c); 2573 mem = _memory_nodes[c->_idx]; 2574 } while (!mem_is_valid(mem, c) && 2575 (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))); 2576 } 2577 } 2578 assert(mem->bottom_type() == Type::MEMORY, ""); 2579 return mem; 2580 } 2581 2582 bool MemoryGraphFixer::has_mem_phi(Node* region) const { 2583 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2584 Node* use = region->fast_out(i); 2585 if (use->is_Phi() && use->bottom_type() == Type::MEMORY && 2586 (_phase->C->get_alias_index(use->adr_type()) == _alias)) { 2587 return true; 2588 } 2589 } 2590 return false; 2591 } 2592 2593 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) { 2594 assert(_phase->ctrl_or_self(new_mem) == new_ctrl, ""); 2595 const bool trace = false; 2596 DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); }); 2597 DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); }); 2598 GrowableArray<Node*> phis; 2599 if (mem_for_ctrl != mem) { 2600 Node* old = mem_for_ctrl; 2601 Node* prev = NULL; 2602 while (old != mem) { 2603 prev = old; 2604 if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) { 2605 assert(_alias == Compile::AliasIdxRaw, ""); 2606 old = old->in(MemNode::Memory); 2607 } else if (old->Opcode() == Op_SCMemProj) { 2608 assert(_alias == Compile::AliasIdxRaw, ""); 2609 old = old->in(0); 2610 } else { 2611 ShouldNotReachHere(); 2612 } 2613 } 2614 assert(prev != NULL, ""); 2615 if (new_ctrl != ctrl) { 2616 _memory_nodes.map(ctrl->_idx, mem); 2617 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl); 2618 } 2619 uint input = (uint)MemNode::Memory; 2620 _phase->igvn().replace_input_of(prev, input, new_mem); 2621 } else { 2622 uses.clear(); 2623 _memory_nodes.map(new_ctrl->_idx, new_mem); 2624 uses.push(new_ctrl); 2625 for(uint next = 0; next < uses.size(); next++ ) { 2626 Node *n = uses.at(next); 2627 assert(n->is_CFG(), ""); 2628 DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); }); 2629 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2630 Node* u = n->fast_out(i); 2631 if (!u->is_Root() && u->is_CFG() && u != n) { 2632 Node* m = _memory_nodes[u->_idx]; 2633 if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) && 2634 !has_mem_phi(u) && 2635 u->unique_ctrl_out()->Opcode() != Op_Halt) { 2636 DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); }); 2637 DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); }); 2638 2639 if (!mem_is_valid(m, u) || !m->is_Phi()) { 2640 bool push = true; 2641 bool create_phi = true; 2642 if (_phase->is_dominator(new_ctrl, u)) { 2643 create_phi = false; 2644 } else if (!_phase->C->has_irreducible_loop()) { 2645 IdealLoopTree* loop = _phase->get_loop(ctrl); 2646 bool do_check = true; 2647 IdealLoopTree* l = loop; 2648 create_phi = false; 2649 while (l != _phase->ltree_root()) { 2650 Node* head = l->_head; 2651 if (head->in(0) == NULL) { 2652 head = _phase->get_ctrl(head); 2653 } 2654 if (_phase->is_dominator(head, u) && _phase->is_dominator(_phase->idom(u), head)) { 2655 create_phi = true; 2656 do_check = false; 2657 break; 2658 } 2659 l = l->_parent; 2660 } 2661 2662 if (do_check) { 2663 assert(!create_phi, ""); 2664 IdealLoopTree* u_loop = _phase->get_loop(u); 2665 if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) { 2666 Node* c = ctrl; 2667 while (!_phase->is_dominator(c, u_loop->tail())) { 2668 c = _phase->idom(c); 2669 } 2670 if (!_phase->is_dominator(c, u)) { 2671 do_check = false; 2672 } 2673 } 2674 } 2675 2676 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) { 2677 create_phi = true; 2678 } 2679 } 2680 if (create_phi) { 2681 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias)); 2682 _phase->register_new_node(phi, u); 2683 phis.push(phi); 2684 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); }); 2685 if (!mem_is_valid(m, u)) { 2686 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); }); 2687 _memory_nodes.map(u->_idx, phi); 2688 } else { 2689 DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); }); 2690 for (;;) { 2691 assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), ""); 2692 Node* next = NULL; 2693 if (m->is_Proj()) { 2694 next = m->in(0); 2695 } else { 2696 assert(m->is_Mem() || m->is_LoadStore(), ""); 2697 assert(_alias == Compile::AliasIdxRaw, ""); 2698 next = m->in(MemNode::Memory); 2699 } 2700 if (_phase->get_ctrl(next) != u) { 2701 break; 2702 } 2703 if (next->is_MergeMem()) { 2704 assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, ""); 2705 break; 2706 } 2707 if (next->is_Phi()) { 2708 assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, ""); 2709 break; 2710 } 2711 m = next; 2712 } 2713 2714 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); }); 2715 assert(m->is_Mem() || m->is_LoadStore(), ""); 2716 uint input = (uint)MemNode::Memory; 2717 _phase->igvn().replace_input_of(m, input, phi); 2718 push = false; 2719 } 2720 } else { 2721 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); }); 2722 } 2723 if (push) { 2724 uses.push(u); 2725 } 2726 } 2727 } else if (!mem_is_valid(m, u) && 2728 !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) { 2729 uses.push(u); 2730 } 2731 } 2732 } 2733 } 2734 for (int i = 0; i < phis.length(); i++) { 2735 Node* n = phis.at(i); 2736 Node* r = n->in(0); 2737 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); }); 2738 for (uint j = 1; j < n->req(); j++) { 2739 Node* m = find_mem(r->in(j), NULL); 2740 _phase->igvn().replace_input_of(n, j, m); 2741 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); }); 2742 } 2743 } 2744 } 2745 uint last = _phase->C->unique(); 2746 MergeMemNode* mm = NULL; 2747 int alias = _alias; 2748 DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); }); 2749 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 2750 Node* u = mem->out(i); 2751 if (u->_idx < last) { 2752 if (u->is_Mem()) { 2753 if (_phase->C->get_alias_index(u->adr_type()) == alias) { 2754 Node* m = find_mem(_phase->get_ctrl(u), u); 2755 if (m != mem) { 2756 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2757 _phase->igvn().replace_input_of(u, MemNode::Memory, m); 2758 --i; 2759 } 2760 } 2761 } else if (u->is_MergeMem()) { 2762 MergeMemNode* u_mm = u->as_MergeMem(); 2763 if (u_mm->memory_at(alias) == mem) { 2764 MergeMemNode* newmm = NULL; 2765 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 2766 Node* uu = u->fast_out(j); 2767 assert(!uu->is_MergeMem(), "chain of MergeMems?"); 2768 if (uu->is_Phi()) { 2769 assert(uu->adr_type() == TypePtr::BOTTOM, ""); 2770 Node* region = uu->in(0); 2771 int nb = 0; 2772 for (uint k = 1; k < uu->req(); k++) { 2773 if (uu->in(k) == u) { 2774 Node* m = find_mem(region->in(k), NULL); 2775 if (m != mem) { 2776 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); }); 2777 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); 2778 if (newmm != u) { 2779 _phase->igvn().replace_input_of(uu, k, newmm); 2780 nb++; 2781 --jmax; 2782 } 2783 } 2784 } 2785 } 2786 if (nb > 0) { 2787 --j; 2788 } 2789 } else { 2790 Node* m = find_mem(_phase->ctrl_or_self(uu), uu); 2791 if (m != mem) { 2792 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); }); 2793 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); 2794 if (newmm != u) { 2795 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); 2796 --j, --jmax; 2797 } 2798 } 2799 } 2800 } 2801 } 2802 } else if (u->is_Phi()) { 2803 assert(u->bottom_type() == Type::MEMORY, "what else?"); 2804 if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) { 2805 Node* region = u->in(0); 2806 bool replaced = false; 2807 for (uint j = 1; j < u->req(); j++) { 2808 if (u->in(j) == mem) { 2809 Node* m = find_mem(region->in(j), NULL); 2810 Node* nnew = m; 2811 if (m != mem) { 2812 if (u->adr_type() == TypePtr::BOTTOM) { 2813 mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m)); 2814 nnew = mm; 2815 } 2816 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); }); 2817 _phase->igvn().replace_input_of(u, j, nnew); 2818 replaced = true; 2819 } 2820 } 2821 } 2822 if (replaced) { 2823 --i; 2824 } 2825 } 2826 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || 2827 u->adr_type() == NULL) { 2828 assert(u->adr_type() != NULL || 2829 u->Opcode() == Op_Rethrow || 2830 u->Opcode() == Op_Return || 2831 u->Opcode() == Op_SafePoint || 2832 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || 2833 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || 2834 u->Opcode() == Op_CallLeaf, ""); 2835 Node* m = find_mem(_phase->ctrl_or_self(u), u); 2836 if (m != mem) { 2837 mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m)); 2838 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); 2839 --i; 2840 } 2841 } else if (_phase->C->get_alias_index(u->adr_type()) == alias) { 2842 Node* m = find_mem(_phase->ctrl_or_self(u), u); 2843 if (m != mem) { 2844 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2845 _phase->igvn().replace_input_of(u, u->find_edge(mem), m); 2846 --i; 2847 } 2848 } else if (u->adr_type() != TypePtr::BOTTOM && 2849 _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) { 2850 Node* m = find_mem(_phase->ctrl_or_self(u), u); 2851 assert(m != mem, ""); 2852 // u is on the wrong slice... 2853 assert(u->is_ClearArray(), ""); 2854 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2855 _phase->igvn().replace_input_of(u, u->find_edge(mem), m); 2856 --i; 2857 } 2858 } 2859 } 2860 #ifdef ASSERT 2861 assert(new_mem->outcnt() > 0, ""); 2862 for (int i = 0; i < phis.length(); i++) { 2863 Node* n = phis.at(i); 2864 assert(n->outcnt() > 0, "new phi must have uses now"); 2865 } 2866 #endif 2867 } 2868 2869 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const { 2870 MergeMemNode* mm = MergeMemNode::make(mem); 2871 mm->set_memory_at(_alias, rep_proj); 2872 _phase->register_new_node(mm, rep_ctrl); 2873 return mm; 2874 } 2875 2876 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const { 2877 MergeMemNode* newmm = NULL; 2878 MergeMemNode* u_mm = u->as_MergeMem(); 2879 Node* c = _phase->get_ctrl(u); 2880 if (_phase->is_dominator(c, rep_ctrl)) { 2881 c = rep_ctrl; 2882 } else { 2883 assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other"); 2884 } 2885 if (u->outcnt() == 1) { 2886 if (u->req() > (uint)_alias && u->in(_alias) == mem) { 2887 _phase->igvn().replace_input_of(u, _alias, rep_proj); 2888 --i; 2889 } else { 2890 _phase->igvn().rehash_node_delayed(u); 2891 u_mm->set_memory_at(_alias, rep_proj); 2892 } 2893 newmm = u_mm; 2894 _phase->set_ctrl_and_loop(u, c); 2895 } else { 2896 // can't simply clone u and then change one of its input because 2897 // it adds and then removes an edge which messes with the 2898 // DUIterator 2899 newmm = MergeMemNode::make(u_mm->base_memory()); 2900 for (uint j = 0; j < u->req(); j++) { 2901 if (j < newmm->req()) { 2902 if (j == (uint)_alias) { 2903 newmm->set_req(j, rep_proj); 2904 } else if (newmm->in(j) != u->in(j)) { 2905 newmm->set_req(j, u->in(j)); 2906 } 2907 } else if (j == (uint)_alias) { 2908 newmm->add_req(rep_proj); 2909 } else { 2910 newmm->add_req(u->in(j)); 2911 } 2912 } 2913 if ((uint)_alias >= u->req()) { 2914 newmm->set_memory_at(_alias, rep_proj); 2915 } 2916 _phase->register_new_node(newmm, c); 2917 } 2918 return newmm; 2919 } 2920 2921 bool MemoryGraphFixer::should_process_phi(Node* phi) const { 2922 if (phi->adr_type() == TypePtr::BOTTOM) { 2923 Node* region = phi->in(0); 2924 for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) { 2925 Node* uu = region->fast_out(j); 2926 if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) { 2927 return false; 2928 } 2929 } 2930 return true; 2931 } 2932 return _phase->C->get_alias_index(phi->adr_type()) == _alias; 2933 } 2934 2935 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const { 2936 uint last = _phase-> C->unique(); 2937 MergeMemNode* mm = NULL; 2938 assert(mem->bottom_type() == Type::MEMORY, ""); 2939 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 2940 Node* u = mem->out(i); 2941 if (u != replacement && u->_idx < last) { 2942 if (u->is_MergeMem()) { 2943 MergeMemNode* u_mm = u->as_MergeMem(); 2944 if (u_mm->memory_at(_alias) == mem) { 2945 MergeMemNode* newmm = NULL; 2946 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 2947 Node* uu = u->fast_out(j); 2948 assert(!uu->is_MergeMem(), "chain of MergeMems?"); 2949 if (uu->is_Phi()) { 2950 if (should_process_phi(uu)) { 2951 Node* region = uu->in(0); 2952 int nb = 0; 2953 for (uint k = 1; k < uu->req(); k++) { 2954 if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) { 2955 if (newmm == NULL) { 2956 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); 2957 } 2958 if (newmm != u) { 2959 _phase->igvn().replace_input_of(uu, k, newmm); 2960 nb++; 2961 --jmax; 2962 } 2963 } 2964 } 2965 if (nb > 0) { 2966 --j; 2967 } 2968 } 2969 } else { 2970 if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) { 2971 if (newmm == NULL) { 2972 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); 2973 } 2974 if (newmm != u) { 2975 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); 2976 --j, --jmax; 2977 } 2978 } 2979 } 2980 } 2981 } 2982 } else if (u->is_Phi()) { 2983 assert(u->bottom_type() == Type::MEMORY, "what else?"); 2984 Node* region = u->in(0); 2985 if (should_process_phi(u)) { 2986 bool replaced = false; 2987 for (uint j = 1; j < u->req(); j++) { 2988 if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) { 2989 Node* nnew = rep_proj; 2990 if (u->adr_type() == TypePtr::BOTTOM) { 2991 if (mm == NULL) { 2992 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); 2993 } 2994 nnew = mm; 2995 } 2996 _phase->igvn().replace_input_of(u, j, nnew); 2997 replaced = true; 2998 } 2999 } 3000 if (replaced) { 3001 --i; 3002 } 3003 3004 } 3005 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || 3006 u->adr_type() == NULL) { 3007 assert(u->adr_type() != NULL || 3008 u->Opcode() == Op_Rethrow || 3009 u->Opcode() == Op_Return || 3010 u->Opcode() == Op_SafePoint || 3011 u->Opcode() == Op_StoreIConditional || 3012 u->Opcode() == Op_StoreLConditional || 3013 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || 3014 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || 3015 u->Opcode() == Op_CallLeaf, "%s", u->Name()); 3016 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 3017 if (mm == NULL) { 3018 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); 3019 } 3020 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); 3021 --i; 3022 } 3023 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 3024 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 3025 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); 3026 --i; 3027 } 3028 } 3029 } 3030 } 3031 } 3032 3033 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj) 3034 : Node(ctrl, obj) { 3035 ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this); 3036 } 3037 3038 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const { 3039 if (in(ValueIn) == NULL || in(ValueIn)->is_top()) { 3040 return Type::TOP; 3041 } 3042 const Type* t = in(ValueIn)->bottom_type(); 3043 if (t == TypePtr::NULL_PTR) { 3044 return t; 3045 } 3046 return t->is_oopptr(); 3047 } 3048 3049 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const { 3050 // Either input is TOP ==> the result is TOP 3051 const Type *t2 = phase->type(in(ValueIn)); 3052 if( t2 == Type::TOP ) return Type::TOP; 3053 3054 if (t2 == TypePtr::NULL_PTR) { 3055 return t2; 3056 } 3057 3058 const Type* type = t2->is_oopptr()/*->cast_to_nonconst()*/; 3059 return type; 3060 } 3061 3062 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) { 3063 Node* value = in(ValueIn); 3064 if (!needs_barrier(phase, value)) { 3065 return value; 3066 } 3067 return this; 3068 } 3069 3070 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) { 3071 Unique_Node_List visited; 3072 return needs_barrier_impl(phase, n, visited); 3073 } 3074 3075 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) { 3076 if (n == NULL) return false; 3077 if (visited.member(n)) { 3078 return false; // Been there. 3079 } 3080 visited.push(n); 3081 3082 if (n->is_Allocate()) { 3083 // tty->print_cr("optimize barrier on alloc"); 3084 return false; 3085 } 3086 if (n->is_Call()) { 3087 // tty->print_cr("optimize barrier on call"); 3088 return false; 3089 } 3090 3091 const Type* type = phase->type(n); 3092 if (type == Type::TOP) { 3093 return false; 3094 } 3095 if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) { 3096 // tty->print_cr("optimize barrier on null"); 3097 return false; 3098 } 3099 if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) { 3100 // tty->print_cr("optimize barrier on constant"); 3101 return false; 3102 } 3103 3104 switch (n->Opcode()) { 3105 case Op_AddP: 3106 return true; // TODO: Can refine? 3107 case Op_LoadP: 3108 case Op_ShenandoahCompareAndExchangeN: 3109 case Op_ShenandoahCompareAndExchangeP: 3110 case Op_CompareAndExchangeN: 3111 case Op_CompareAndExchangeP: 3112 case Op_GetAndSetN: 3113 case Op_GetAndSetP: 3114 return true; 3115 case Op_Phi: { 3116 for (uint i = 1; i < n->req(); i++) { 3117 if (needs_barrier_impl(phase, n->in(i), visited)) return true; 3118 } 3119 return false; 3120 } 3121 case Op_CheckCastPP: 3122 case Op_CastPP: 3123 return needs_barrier_impl(phase, n->in(1), visited); 3124 case Op_Proj: 3125 return needs_barrier_impl(phase, n->in(0), visited); 3126 case Op_ShenandoahLoadReferenceBarrier: 3127 // tty->print_cr("optimize barrier on barrier"); 3128 return false; 3129 case Op_Parm: 3130 // tty->print_cr("optimize barrier on input arg"); 3131 return false; 3132 case Op_DecodeN: 3133 case Op_EncodeP: 3134 return needs_barrier_impl(phase, n->in(1), visited); 3135 case Op_LoadN: 3136 return true; 3137 case Op_CMoveP: 3138 return needs_barrier_impl(phase, n->in(2), visited) || 3139 needs_barrier_impl(phase, n->in(3), visited); 3140 case Op_ShenandoahEnqueueBarrier: 3141 return needs_barrier_impl(phase, n->in(1), visited); 3142 default: 3143 break; 3144 } 3145 #ifdef ASSERT 3146 tty->print("need barrier on?: "); 3147 tty->print_cr("ins:"); 3148 n->dump(2); 3149 tty->print_cr("outs:"); 3150 n->dump(-2); 3151 ShouldNotReachHere(); 3152 #endif 3153 return true; 3154 } 3155 3156 ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() { 3157 Unique_Node_List visited; 3158 Node_Stack stack(0); 3159 stack.push(this, 0); 3160 Strength strength = NONE; 3161 while (strength != STRONG && stack.size() > 0) { 3162 Node* n = stack.node(); 3163 if (visited.member(n)) { 3164 stack.pop(); 3165 continue; 3166 } 3167 visited.push(n); 3168 bool visit_users = false; 3169 switch (n->Opcode()) { 3170 case Op_StoreN: 3171 case Op_StoreP: { 3172 strength = STRONG; 3173 break; 3174 } 3175 case Op_CmpP: { 3176 if (!n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) && 3177 !n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) { 3178 strength = STRONG; 3179 } 3180 break; 3181 } 3182 case Op_CallStaticJava: { 3183 strength = STRONG; 3184 break; 3185 } 3186 case Op_CallDynamicJava: 3187 case Op_CallLeaf: 3188 case Op_CallLeafNoFP: 3189 case Op_CompareAndSwapL: 3190 case Op_CompareAndSwapI: 3191 case Op_CompareAndSwapB: 3192 case Op_CompareAndSwapS: 3193 case Op_CompareAndSwapN: 3194 case Op_CompareAndSwapP: 3195 case Op_CompareAndExchangeL: 3196 case Op_CompareAndExchangeI: 3197 case Op_CompareAndExchangeB: 3198 case Op_CompareAndExchangeS: 3199 case Op_CompareAndExchangeN: 3200 case Op_CompareAndExchangeP: 3201 case Op_WeakCompareAndSwapL: 3202 case Op_WeakCompareAndSwapI: 3203 case Op_WeakCompareAndSwapB: 3204 case Op_WeakCompareAndSwapS: 3205 case Op_WeakCompareAndSwapN: 3206 case Op_WeakCompareAndSwapP: 3207 case Op_ShenandoahCompareAndSwapN: 3208 case Op_ShenandoahCompareAndSwapP: 3209 case Op_ShenandoahWeakCompareAndSwapN: 3210 case Op_ShenandoahWeakCompareAndSwapP: 3211 case Op_ShenandoahCompareAndExchangeN: 3212 case Op_ShenandoahCompareAndExchangeP: 3213 case Op_GetAndSetL: 3214 case Op_GetAndSetI: 3215 case Op_GetAndSetB: 3216 case Op_GetAndSetS: 3217 case Op_GetAndSetP: 3218 case Op_GetAndSetN: 3219 case Op_GetAndAddL: 3220 case Op_GetAndAddI: 3221 case Op_GetAndAddB: 3222 case Op_GetAndAddS: 3223 case Op_ShenandoahEnqueueBarrier: 3224 case Op_FastLock: 3225 case Op_FastUnlock: 3226 case Op_Rethrow: 3227 case Op_Return: 3228 case Op_StoreB: 3229 case Op_StoreC: 3230 case Op_StoreD: 3231 case Op_StoreF: 3232 case Op_StoreL: 3233 case Op_StoreLConditional: 3234 case Op_StoreI: 3235 case Op_StoreIConditional: 3236 case Op_StoreVector: 3237 case Op_StrInflatedCopy: 3238 case Op_StrCompressedCopy: 3239 case Op_EncodeP: 3240 case Op_CastP2X: 3241 case Op_SafePoint: 3242 case Op_EncodeISOArray: 3243 strength = STRONG; 3244 break; 3245 case Op_LoadB: 3246 case Op_LoadUB: 3247 case Op_LoadUS: 3248 case Op_LoadD: 3249 case Op_LoadF: 3250 case Op_LoadL: 3251 case Op_LoadI: 3252 case Op_LoadS: 3253 case Op_LoadN: 3254 case Op_LoadP: 3255 case Op_LoadVector: { 3256 const TypePtr* adr_type = n->adr_type(); 3257 int alias_idx = Compile::current()->get_alias_index(adr_type); 3258 Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx); 3259 ciField* field = alias_type->field(); 3260 bool is_static = field != NULL && field->is_static(); 3261 bool is_final = field != NULL && field->is_final(); 3262 bool is_stable = field != NULL && field->is_stable(); 3263 if (ShenandoahOptimizeStaticFinals && is_static && is_final) { 3264 // Leave strength as is. 3265 } else if (ShenandoahOptimizeInstanceFinals && !is_static && is_final) { 3266 // Leave strength as is. 3267 } else if (ShenandoahOptimizeStableFinals && (is_stable || (adr_type->isa_aryptr() && adr_type->isa_aryptr()->is_stable()))) { 3268 // Leave strength as is. 3269 } else { 3270 strength = WEAK; 3271 } 3272 break; 3273 } 3274 case Op_AryEq: { 3275 Node* n1 = n->in(2); 3276 Node* n2 = n->in(3); 3277 if (!ShenandoahOptimizeStableFinals || 3278 !n1->bottom_type()->isa_aryptr() || !n1->bottom_type()->isa_aryptr()->is_stable() || 3279 !n2->bottom_type()->isa_aryptr() || !n2->bottom_type()->isa_aryptr()->is_stable()) { 3280 strength = WEAK; 3281 } 3282 break; 3283 } 3284 case Op_StrEquals: 3285 case Op_StrComp: 3286 case Op_StrIndexOf: 3287 case Op_StrIndexOfChar: 3288 if (!ShenandoahOptimizeStableFinals) { 3289 strength = WEAK; 3290 } 3291 break; 3292 case Op_Conv2B: 3293 case Op_LoadRange: 3294 case Op_LoadKlass: 3295 case Op_LoadNKlass: 3296 // NONE, i.e. leave current strength as is 3297 break; 3298 case Op_AddP: 3299 case Op_CheckCastPP: 3300 case Op_CastPP: 3301 case Op_CMoveP: 3302 case Op_Phi: 3303 case Op_ShenandoahLoadReferenceBarrier: 3304 visit_users = true; 3305 break; 3306 default: { 3307 #ifdef ASSERT 3308 tty->print_cr("Unknown node in get_barrier_strength:"); 3309 n->dump(1); 3310 ShouldNotReachHere(); 3311 #else 3312 strength = STRONG; 3313 #endif 3314 } 3315 } 3316 #ifdef ASSERT 3317 /* 3318 if (strength == STRONG) { 3319 tty->print("strengthening node: "); 3320 n->dump(); 3321 } 3322 */ 3323 #endif 3324 stack.pop(); 3325 if (visit_users) { 3326 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3327 Node* user = n->fast_out(i); 3328 if (user != NULL) { 3329 stack.push(user, 0); 3330 } 3331 } 3332 } 3333 } 3334 return strength; 3335 } 3336 3337 CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) { 3338 Node* val = in(ValueIn); 3339 3340 const Type* val_t = igvn.type(val); 3341 3342 if (val_t->meet(TypePtr::NULL_PTR) != val_t && 3343 val->Opcode() == Op_CastPP && 3344 val->in(0) != NULL && 3345 val->in(0)->Opcode() == Op_IfTrue && 3346 val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && 3347 val->in(0)->in(0)->is_If() && 3348 val->in(0)->in(0)->in(1)->Opcode() == Op_Bool && 3349 val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && 3350 val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && 3351 val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) && 3352 val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { 3353 assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), ""); 3354 CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 3355 return unc; 3356 } 3357 return NULL; 3358 }