1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
574 ex_obj = env()->ArithmeticException_instance();
575 break;
576 case Deoptimization::Reason_range_check:
577 ex_obj = env()->ArrayIndexOutOfBoundsException_instance();
578 break;
579 case Deoptimization::Reason_class_check:
580 if (java_bc() == Bytecodes::_aastore) {
581 ex_obj = env()->ArrayStoreException_instance();
582 } else {
583 ex_obj = env()->ClassCastException_instance();
584 }
585 break;
586 }
587 if (failing()) { stop(); return; } // exception allocation might fail
588 if (ex_obj != NULL) {
589 // Cheat with a preallocated exception object.
590 if (C->log() != NULL)
591 C->log()->elem("hot_throw preallocated='1' reason='%s'",
592 Deoptimization::trap_reason_name(reason));
593 const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj);
594 Node* ex_node = _gvn.transform( ConNode::make(C, ex_con) );
595
596 // Clear the detail message of the preallocated exception object.
597 // Weblogic sometimes mutates the detail message of exceptions
598 // using reflection.
599 int offset = java_lang_Throwable::get_detailMessage_offset();
600 const TypePtr* adr_typ = ex_con->add_offset(offset);
601
602 Node *adr = basic_plus_adr(ex_node, ex_node, offset);
603 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
604 // Conservatively release stores of object references.
605 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release);
606
607 add_exception_state(make_exception_state(ex_node));
608 return;
609 }
610 }
611
612 // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
613 // It won't be much cheaper than bailing to the interp., since we'll
614 // have to pass up all the debug-info, and the runtime will have to
689 _sp = kit->sp();
690 _reexecute = kit->jvms()->_reexecute;
691 }
692 PreserveReexecuteState::~PreserveReexecuteState() {
693 if (_kit->stopped()) return;
694 _kit->jvms()->_reexecute = _reexecute;
695 _kit->set_sp(_sp);
696 }
697
698 //------------------------------clone_map--------------------------------------
699 // Implementation of PreserveJVMState
700 //
701 // Only clone_map(...) here. If this function is only used in the
702 // PreserveJVMState class we may want to get rid of this extra
703 // function eventually and do it all there.
704
705 SafePointNode* GraphKit::clone_map() {
706 if (map() == NULL) return NULL;
707
708 // Clone the memory edge first
709 Node* mem = MergeMemNode::make(C, map()->memory());
710 gvn().set_type_bottom(mem);
711
712 SafePointNode *clonemap = (SafePointNode*)map()->clone();
713 JVMState* jvms = this->jvms();
714 JVMState* clonejvms = jvms->clone_shallow(C);
715 clonemap->set_memory(mem);
716 clonemap->set_jvms(clonejvms);
717 clonejvms->set_map(clonemap);
718 record_for_igvn(clonemap);
719 gvn().set_type_bottom(clonemap);
720 return clonemap;
721 }
722
723
724 //-----------------------------set_map_clone-----------------------------------
725 void GraphKit::set_map_clone(SafePointNode* m) {
726 _map = m;
727 _map = clone_map();
728 _map->set_next_exception(NULL);
729 debug_only(verify_map());
1118 // short-circuit a common case
1119 if (offset == intcon(0)) return ptr;
1120 return _gvn.transform( new AddPNode(base, ptr, offset) );
1121 }
1122
1123 Node* GraphKit::ConvI2L(Node* offset) {
1124 // short-circuit a common case
1125 jint offset_con = find_int_con(offset, Type::OffsetBot);
1126 if (offset_con != Type::OffsetBot) {
1127 return longcon((jlong) offset_con);
1128 }
1129 return _gvn.transform( new ConvI2LNode(offset));
1130 }
1131
1132 Node* GraphKit::ConvI2UL(Node* offset) {
1133 juint offset_con = (juint) find_int_con(offset, Type::OffsetBot);
1134 if (offset_con != (juint) Type::OffsetBot) {
1135 return longcon((julong) offset_con);
1136 }
1137 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1138 Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) );
1139 return _gvn.transform( new AndLNode(conv, mask) );
1140 }
1141
1142 Node* GraphKit::ConvL2I(Node* offset) {
1143 // short-circuit a common case
1144 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1145 if (offset_con != (jlong)Type::OffsetBot) {
1146 return intcon((int) offset_con);
1147 }
1148 return _gvn.transform( new ConvL2INode(offset));
1149 }
1150
1151 //-------------------------load_object_klass-----------------------------------
1152 Node* GraphKit::load_object_klass(Node* obj) {
1153 // Special-case a fresh allocation to avoid building nodes:
1154 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1155 if (akls != NULL) return akls;
1156 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1157 return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) );
1158 }
1418
1419 //=============================================================================
1420 //--------------------------------memory---------------------------------------
1421 Node* GraphKit::memory(uint alias_idx) {
1422 MergeMemNode* mem = merged_memory();
1423 Node* p = mem->memory_at(alias_idx);
1424 _gvn.set_type(p, Type::MEMORY); // must be mapped
1425 return p;
1426 }
1427
1428 //-----------------------------reset_memory------------------------------------
1429 Node* GraphKit::reset_memory() {
1430 Node* mem = map()->memory();
1431 // do not use this node for any more parsing!
1432 debug_only( map()->set_memory((Node*)NULL) );
1433 return _gvn.transform( mem );
1434 }
1435
1436 //------------------------------set_all_memory---------------------------------
1437 void GraphKit::set_all_memory(Node* newmem) {
1438 Node* mergemem = MergeMemNode::make(C, newmem);
1439 gvn().set_type_bottom(mergemem);
1440 map()->set_memory(mergemem);
1441 }
1442
1443 //------------------------------set_all_memory_call----------------------------
1444 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1445 Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1446 set_all_memory(newmem);
1447 }
1448
1449 //=============================================================================
1450 //
1451 // parser factory methods for MemNodes
1452 //
1453 // These are layered on top of the factory methods in LoadNode and StoreNode,
1454 // and integrate with the parser's memory state and _gvn engine.
1455 //
1456
1457 // factory methods in "int adr_idx"
1458 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1459 int adr_idx,
1460 MemNode::MemOrd mo, bool require_atomic_access) {
1461 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1462 const TypePtr* adr_type = NULL; // debug-mode-only argument
1463 debug_only(adr_type = C->get_adr_type(adr_idx));
1464 Node* mem = memory(adr_idx);
1465 Node* ld;
1466 if (require_atomic_access && bt == T_LONG) {
1467 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
1468 } else if (require_atomic_access && bt == T_DOUBLE) {
1469 ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
1470 } else {
1471 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
1472 }
1473 ld = _gvn.transform(ld);
1474 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1475 // Improve graph before escape analysis and boxing elimination.
1476 record_for_igvn(ld);
1477 }
1478 return ld;
1479 }
1480
1481 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1482 int adr_idx,
1483 MemNode::MemOrd mo,
1484 bool require_atomic_access) {
1485 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1486 const TypePtr* adr_type = NULL;
1487 debug_only(adr_type = C->get_adr_type(adr_idx));
1488 Node *mem = memory(adr_idx);
1489 Node* st;
1490 if (require_atomic_access && bt == T_LONG) {
1491 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
1492 } else if (require_atomic_access && bt == T_DOUBLE) {
1493 st = StoreDNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
1494 } else {
1495 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1496 }
1497 st = _gvn.transform(st);
1498 set_memory(st, adr_idx);
1499 // Back-to-back stores can only remove intermediate store with DU info
1500 // so push on worklist for optimizer.
1501 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1502 record_for_igvn(st);
1503
1504 return st;
1505 }
1506
1507
1508 void GraphKit::pre_barrier(bool do_load,
1509 Node* ctl,
1510 Node* obj,
1511 Node* adr,
1512 uint adr_idx,
1513 Node* val,
3347 // create a memory projection as for the normal control path
3348 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3349 set_memory(malloc, rawidx);
3350
3351 // a normal slow-call doesn't change i_o, but an allocation does
3352 // we create a separate i_o projection for the normal control path
3353 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3354 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3355
3356 // put in an initialization barrier
3357 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3358 rawoop)->as_Initialize();
3359 assert(alloc->initialization() == init, "2-way macro link must work");
3360 assert(init ->allocation() == alloc, "2-way macro link must work");
3361 {
3362 // Extract memory strands which may participate in the new object's
3363 // initialization, and source them from the new InitializeNode.
3364 // This will allow us to observe initializations when they occur,
3365 // and link them properly (as a group) to the InitializeNode.
3366 assert(init->in(InitializeNode::Memory) == malloc, "");
3367 MergeMemNode* minit_in = MergeMemNode::make(C, malloc);
3368 init->set_req(InitializeNode::Memory, minit_in);
3369 record_for_igvn(minit_in); // fold it up later, if possible
3370 Node* minit_out = memory(rawidx);
3371 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3372 if (oop_type->isa_aryptr()) {
3373 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3374 int elemidx = C->get_alias_index(telemref);
3375 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3376 } else if (oop_type->isa_instptr()) {
3377 ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
3378 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3379 ciField* field = ik->nonstatic_field_at(i);
3380 if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
3381 continue; // do not bother to track really large numbers of fields
3382 // Find (or create) the alias category for this field:
3383 int fieldidx = C->alias_type(field)->index();
3384 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3385 }
3386 }
3387 }
|
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
574 ex_obj = env()->ArithmeticException_instance();
575 break;
576 case Deoptimization::Reason_range_check:
577 ex_obj = env()->ArrayIndexOutOfBoundsException_instance();
578 break;
579 case Deoptimization::Reason_class_check:
580 if (java_bc() == Bytecodes::_aastore) {
581 ex_obj = env()->ArrayStoreException_instance();
582 } else {
583 ex_obj = env()->ClassCastException_instance();
584 }
585 break;
586 }
587 if (failing()) { stop(); return; } // exception allocation might fail
588 if (ex_obj != NULL) {
589 // Cheat with a preallocated exception object.
590 if (C->log() != NULL)
591 C->log()->elem("hot_throw preallocated='1' reason='%s'",
592 Deoptimization::trap_reason_name(reason));
593 const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj);
594 Node* ex_node = _gvn.transform( ConNode::make(ex_con) );
595
596 // Clear the detail message of the preallocated exception object.
597 // Weblogic sometimes mutates the detail message of exceptions
598 // using reflection.
599 int offset = java_lang_Throwable::get_detailMessage_offset();
600 const TypePtr* adr_typ = ex_con->add_offset(offset);
601
602 Node *adr = basic_plus_adr(ex_node, ex_node, offset);
603 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
604 // Conservatively release stores of object references.
605 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release);
606
607 add_exception_state(make_exception_state(ex_node));
608 return;
609 }
610 }
611
612 // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
613 // It won't be much cheaper than bailing to the interp., since we'll
614 // have to pass up all the debug-info, and the runtime will have to
689 _sp = kit->sp();
690 _reexecute = kit->jvms()->_reexecute;
691 }
692 PreserveReexecuteState::~PreserveReexecuteState() {
693 if (_kit->stopped()) return;
694 _kit->jvms()->_reexecute = _reexecute;
695 _kit->set_sp(_sp);
696 }
697
698 //------------------------------clone_map--------------------------------------
699 // Implementation of PreserveJVMState
700 //
701 // Only clone_map(...) here. If this function is only used in the
702 // PreserveJVMState class we may want to get rid of this extra
703 // function eventually and do it all there.
704
705 SafePointNode* GraphKit::clone_map() {
706 if (map() == NULL) return NULL;
707
708 // Clone the memory edge first
709 Node* mem = MergeMemNode::make(map()->memory());
710 gvn().set_type_bottom(mem);
711
712 SafePointNode *clonemap = (SafePointNode*)map()->clone();
713 JVMState* jvms = this->jvms();
714 JVMState* clonejvms = jvms->clone_shallow(C);
715 clonemap->set_memory(mem);
716 clonemap->set_jvms(clonejvms);
717 clonejvms->set_map(clonemap);
718 record_for_igvn(clonemap);
719 gvn().set_type_bottom(clonemap);
720 return clonemap;
721 }
722
723
724 //-----------------------------set_map_clone-----------------------------------
725 void GraphKit::set_map_clone(SafePointNode* m) {
726 _map = m;
727 _map = clone_map();
728 _map->set_next_exception(NULL);
729 debug_only(verify_map());
1118 // short-circuit a common case
1119 if (offset == intcon(0)) return ptr;
1120 return _gvn.transform( new AddPNode(base, ptr, offset) );
1121 }
1122
1123 Node* GraphKit::ConvI2L(Node* offset) {
1124 // short-circuit a common case
1125 jint offset_con = find_int_con(offset, Type::OffsetBot);
1126 if (offset_con != Type::OffsetBot) {
1127 return longcon((jlong) offset_con);
1128 }
1129 return _gvn.transform( new ConvI2LNode(offset));
1130 }
1131
1132 Node* GraphKit::ConvI2UL(Node* offset) {
1133 juint offset_con = (juint) find_int_con(offset, Type::OffsetBot);
1134 if (offset_con != (juint) Type::OffsetBot) {
1135 return longcon((julong) offset_con);
1136 }
1137 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1138 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1139 return _gvn.transform( new AndLNode(conv, mask) );
1140 }
1141
1142 Node* GraphKit::ConvL2I(Node* offset) {
1143 // short-circuit a common case
1144 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1145 if (offset_con != (jlong)Type::OffsetBot) {
1146 return intcon((int) offset_con);
1147 }
1148 return _gvn.transform( new ConvL2INode(offset));
1149 }
1150
1151 //-------------------------load_object_klass-----------------------------------
1152 Node* GraphKit::load_object_klass(Node* obj) {
1153 // Special-case a fresh allocation to avoid building nodes:
1154 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1155 if (akls != NULL) return akls;
1156 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1157 return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) );
1158 }
1418
1419 //=============================================================================
1420 //--------------------------------memory---------------------------------------
1421 Node* GraphKit::memory(uint alias_idx) {
1422 MergeMemNode* mem = merged_memory();
1423 Node* p = mem->memory_at(alias_idx);
1424 _gvn.set_type(p, Type::MEMORY); // must be mapped
1425 return p;
1426 }
1427
1428 //-----------------------------reset_memory------------------------------------
1429 Node* GraphKit::reset_memory() {
1430 Node* mem = map()->memory();
1431 // do not use this node for any more parsing!
1432 debug_only( map()->set_memory((Node*)NULL) );
1433 return _gvn.transform( mem );
1434 }
1435
1436 //------------------------------set_all_memory---------------------------------
1437 void GraphKit::set_all_memory(Node* newmem) {
1438 Node* mergemem = MergeMemNode::make(newmem);
1439 gvn().set_type_bottom(mergemem);
1440 map()->set_memory(mergemem);
1441 }
1442
1443 //------------------------------set_all_memory_call----------------------------
1444 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1445 Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1446 set_all_memory(newmem);
1447 }
1448
1449 //=============================================================================
1450 //
1451 // parser factory methods for MemNodes
1452 //
1453 // These are layered on top of the factory methods in LoadNode and StoreNode,
1454 // and integrate with the parser's memory state and _gvn engine.
1455 //
1456
1457 // factory methods in "int adr_idx"
1458 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1459 int adr_idx,
1460 MemNode::MemOrd mo, bool require_atomic_access) {
1461 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1462 const TypePtr* adr_type = NULL; // debug-mode-only argument
1463 debug_only(adr_type = C->get_adr_type(adr_idx));
1464 Node* mem = memory(adr_idx);
1465 Node* ld;
1466 if (require_atomic_access && bt == T_LONG) {
1467 ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo);
1468 } else if (require_atomic_access && bt == T_DOUBLE) {
1469 ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo);
1470 } else {
1471 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
1472 }
1473 ld = _gvn.transform(ld);
1474 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1475 // Improve graph before escape analysis and boxing elimination.
1476 record_for_igvn(ld);
1477 }
1478 return ld;
1479 }
1480
1481 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1482 int adr_idx,
1483 MemNode::MemOrd mo,
1484 bool require_atomic_access) {
1485 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1486 const TypePtr* adr_type = NULL;
1487 debug_only(adr_type = C->get_adr_type(adr_idx));
1488 Node *mem = memory(adr_idx);
1489 Node* st;
1490 if (require_atomic_access && bt == T_LONG) {
1491 st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1492 } else if (require_atomic_access && bt == T_DOUBLE) {
1493 st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1494 } else {
1495 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1496 }
1497 st = _gvn.transform(st);
1498 set_memory(st, adr_idx);
1499 // Back-to-back stores can only remove intermediate store with DU info
1500 // so push on worklist for optimizer.
1501 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1502 record_for_igvn(st);
1503
1504 return st;
1505 }
1506
1507
1508 void GraphKit::pre_barrier(bool do_load,
1509 Node* ctl,
1510 Node* obj,
1511 Node* adr,
1512 uint adr_idx,
1513 Node* val,
3347 // create a memory projection as for the normal control path
3348 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3349 set_memory(malloc, rawidx);
3350
3351 // a normal slow-call doesn't change i_o, but an allocation does
3352 // we create a separate i_o projection for the normal control path
3353 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3354 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3355
3356 // put in an initialization barrier
3357 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3358 rawoop)->as_Initialize();
3359 assert(alloc->initialization() == init, "2-way macro link must work");
3360 assert(init ->allocation() == alloc, "2-way macro link must work");
3361 {
3362 // Extract memory strands which may participate in the new object's
3363 // initialization, and source them from the new InitializeNode.
3364 // This will allow us to observe initializations when they occur,
3365 // and link them properly (as a group) to the InitializeNode.
3366 assert(init->in(InitializeNode::Memory) == malloc, "");
3367 MergeMemNode* minit_in = MergeMemNode::make(malloc);
3368 init->set_req(InitializeNode::Memory, minit_in);
3369 record_for_igvn(minit_in); // fold it up later, if possible
3370 Node* minit_out = memory(rawidx);
3371 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3372 if (oop_type->isa_aryptr()) {
3373 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3374 int elemidx = C->get_alias_index(telemref);
3375 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3376 } else if (oop_type->isa_instptr()) {
3377 ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
3378 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3379 ciField* field = ik->nonstatic_field_at(i);
3380 if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
3381 continue; // do not bother to track really large numbers of fields
3382 // Find (or create) the alias category for this field:
3383 int fieldidx = C->alias_type(field)->index();
3384 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3385 }
3386 }
3387 }
|