< prev index next >

src/hotspot/share/opto/graphKit.cpp

Print this page




1614   if (val->is_ValueType()) {
1615     // Allocate value type and get oop
1616     val = val->as_ValueType()->allocate(this, deoptimize_on_exception, safe_for_replace)->get_oop();
1617   }
1618 
1619   C2AccessValuePtr addr(adr, adr_type);
1620   C2AccessValue value(val, val_type);
1621   C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1622   if (access.is_raw()) {
1623     return _barrier_set->BarrierSetC2::store_at(access, value);
1624   } else {
1625     return _barrier_set->store_at(access, value);
1626   }
1627 }
1628 
1629 Node* GraphKit::access_load_at(Node* obj,   // containing obj
1630                                Node* adr,   // actual adress to store val at
1631                                const TypePtr* adr_type,
1632                                const Type* val_type,
1633                                BasicType bt,
1634                                DecoratorSet decorators) {

1635   if (stopped()) {
1636     return top(); // Dead path ?
1637   }
1638 
1639   C2AccessValuePtr addr(adr, adr_type);
1640   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
1641   if (access.is_raw()) {
1642     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1643   } else {
1644     return _barrier_set->load_at(access, val_type);
1645   }
1646 }
1647 
1648 Node* GraphKit::access_load(Node* adr,   // actual adress to load val at
1649                             const Type* val_type,
1650                             BasicType bt,
1651                             DecoratorSet decorators) {
1652   if (stopped()) {
1653     return top(); // Dead path ?
1654   }
1655 
1656   C2AccessValuePtr addr(adr, NULL);
1657   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);
1658   if (access.is_raw()) {
1659     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1660   } else {


3387   { BuildCutout unless(this, bol, PROB_MAX);
3388     inc_sp(nargs);
3389     uncommon_trap(Deoptimization::Reason_class_check,
3390                   Deoptimization::Action_none);
3391   }
3392 }
3393 
3394 // Deoptimize if 'ary' is a null-free value type array and 'val' is null
3395 void GraphKit::gen_value_array_null_guard(Node* ary, Node* val, int nargs) {
3396   assert(EnableValhalla, "should only be used if value types are enabled");
3397   const Type* val_t = _gvn.type(val);
3398   if (val->is_ValueType() || !TypePtr::NULL_PTR->higher_equal(val_t)) {
3399     return; // Never null
3400   }
3401   RegionNode* region = new RegionNode(3);
3402   Node* null_ctl = top();
3403   null_check_oop(val, &null_ctl);
3404   if (null_ctl != top()) {
3405     PreserveJVMState pjvms(this);
3406     set_control(null_ctl);
3407     // Get array element mirror and corresponding value mirror
3408     Node* array_type_mirror = load_mirror_from_klass(load_object_klass(ary));
3409     Node* elem_mirror_adr = basic_plus_adr(array_type_mirror, java_lang_Class::component_mirror_offset_in_bytes());
3410     Node* elem_mirror = access_load_at(array_type_mirror, elem_mirror_adr, _gvn.type(elem_mirror_adr)->is_ptr(), TypeInstPtr::MIRROR, T_OBJECT, IN_HEAP);
3411     Node* inline_mirror_adr = basic_plus_adr(elem_mirror, java_lang_Class::inline_mirror_offset_in_bytes());
3412     Node* inline_mirror = access_load_at(elem_mirror, inline_mirror_adr, _gvn.type(inline_mirror_adr)->is_ptr(), TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR), T_OBJECT, IN_HEAP);
3413     // Deoptimize if elem_mirror == inline_mirror => null-free array
3414     Node* cmp = _gvn.transform(new CmpPNode(elem_mirror, inline_mirror));
3415     Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));











3416     { BuildCutout unless(this, bol, PROB_MAX);
3417       inc_sp(nargs);
3418       uncommon_trap(Deoptimization::Reason_null_check,
3419                     Deoptimization::Action_none);
3420     }
3421     region->init_req(1, control());
3422   }
3423   region->init_req(2, control());
3424   set_control(_gvn.transform(region));
3425   record_for_igvn(region);
3426 }
3427 
3428 Node* GraphKit::load_lh_array_tag(Node* kls) {
3429   Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
3430   Node* layout_val = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);

3431   return _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3432 }
3433 
3434 
3435 Node* GraphKit::gen_lh_array_test(Node* kls, unsigned int lh_value) {
3436   Node* layout_val = load_lh_array_tag(kls);
3437   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(lh_value)));
3438   return cmp;
3439 }
3440 
3441 
3442 //------------------------------next_monitor-----------------------------------
3443 // What number should be given to the next monitor?
3444 int GraphKit::next_monitor() {
3445   int current = jvms()->monitor_depth()* C->sync_stack_slots();
3446   int next = current + C->sync_stack_slots();
3447   // Keep the toplevel high water mark current:
3448   if (C->fixed_slots() < next)  C->set_fixed_slots(next);
3449   return current;
3450 }


3688   assert(init ->allocation()     == alloc, "2-way macro link must work");
3689   {
3690     // Extract memory strands which may participate in the new object's
3691     // initialization, and source them from the new InitializeNode.
3692     // This will allow us to observe initializations when they occur,
3693     // and link them properly (as a group) to the InitializeNode.
3694     assert(init->in(InitializeNode::Memory) == malloc, "");
3695     MergeMemNode* minit_in = MergeMemNode::make(malloc);
3696     init->set_req(InitializeNode::Memory, minit_in);
3697     record_for_igvn(minit_in); // fold it up later, if possible
3698     _gvn.set_type(minit_in, Type::MEMORY);
3699     Node* minit_out = memory(rawidx);
3700     assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3701     // Add an edge in the MergeMem for the header fields so an access
3702     // to one of those has correct memory state
3703     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
3704     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
3705     if (oop_type->isa_aryptr()) {
3706       const TypeAryPtr* arytype = oop_type->is_aryptr();
3707       if (arytype->klass()->is_value_array_klass()) {






3708         ciValueArrayKlass* vak = arytype->klass()->as_value_array_klass();
3709         ciValueKlass* vk = vak->element_klass()->as_value_klass();
3710         for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
3711           ciField* field = vk->nonstatic_field_at(i);
3712           if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
3713             continue;  // do not bother to track really large numbers of fields
3714           int off_in_vt = field->offset() - vk->first_field_offset();
3715           const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
3716           int fieldidx = C->get_alias_index(adr_type);
3717           hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3718         }


3719       } else {
3720         const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3721         int            elemidx  = C->get_alias_index(telemref);
3722         hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3723       }
3724     } else if (oop_type->isa_instptr()) {
3725       set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
3726       ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
3727       for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3728         ciField* field = ik->nonstatic_field_at(i);
3729         if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
3730           continue;  // do not bother to track really large numbers of fields
3731         // Find (or create) the alias category for this field:
3732         int fieldidx = C->alias_type(field)->index();
3733         hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3734       }
3735     }
3736   }
3737 
3738   // Cast raw oop to the real thing...




1614   if (val->is_ValueType()) {
1615     // Allocate value type and get oop
1616     val = val->as_ValueType()->allocate(this, deoptimize_on_exception, safe_for_replace)->get_oop();
1617   }
1618 
1619   C2AccessValuePtr addr(adr, adr_type);
1620   C2AccessValue value(val, val_type);
1621   C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1622   if (access.is_raw()) {
1623     return _barrier_set->BarrierSetC2::store_at(access, value);
1624   } else {
1625     return _barrier_set->store_at(access, value);
1626   }
1627 }
1628 
1629 Node* GraphKit::access_load_at(Node* obj,   // containing obj
1630                                Node* adr,   // actual adress to store val at
1631                                const TypePtr* adr_type,
1632                                const Type* val_type,
1633                                BasicType bt,
1634                                DecoratorSet decorators,
1635                                Node* ctl) {
1636   if (stopped()) {
1637     return top(); // Dead path ?
1638   }
1639 
1640   C2AccessValuePtr addr(adr, adr_type);
1641   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
1642   if (access.is_raw()) {
1643     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1644   } else {
1645     return _barrier_set->load_at(access, val_type);
1646   }
1647 }
1648 
1649 Node* GraphKit::access_load(Node* adr,   // actual adress to load val at
1650                             const Type* val_type,
1651                             BasicType bt,
1652                             DecoratorSet decorators) {
1653   if (stopped()) {
1654     return top(); // Dead path ?
1655   }
1656 
1657   C2AccessValuePtr addr(adr, NULL);
1658   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);
1659   if (access.is_raw()) {
1660     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1661   } else {


3388   { BuildCutout unless(this, bol, PROB_MAX);
3389     inc_sp(nargs);
3390     uncommon_trap(Deoptimization::Reason_class_check,
3391                   Deoptimization::Action_none);
3392   }
3393 }
3394 
3395 // Deoptimize if 'ary' is a null-free value type array and 'val' is null
3396 void GraphKit::gen_value_array_null_guard(Node* ary, Node* val, int nargs) {
3397   assert(EnableValhalla, "should only be used if value types are enabled");
3398   const Type* val_t = _gvn.type(val);
3399   if (val->is_ValueType() || !TypePtr::NULL_PTR->higher_equal(val_t)) {
3400     return; // Never null
3401   }
3402   RegionNode* region = new RegionNode(3);
3403   Node* null_ctl = top();
3404   null_check_oop(val, &null_ctl);
3405   if (null_ctl != top()) {
3406     PreserveJVMState pjvms(this);
3407     set_control(null_ctl);
3408     // Extract null free property from klass pointer
3409     Node* k_adr = basic_plus_adr(ary, oopDesc::klass_offset_in_bytes());
3410     const TypePtr *k_adr_type = k_adr->bottom_type()->isa_ptr();
3411     Node* klass = NULL;
3412     if (k_adr_type->is_ptr_to_narrowklass()) {
3413       klass = _gvn.transform(new LoadNKlassNode(NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT->make_narrowklass(), MemNode::unordered));
3414     } else {
3415       klass = _gvn.transform(new LoadKlassNode(NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT, MemNode::unordered));
3416     }
3417     
3418     Node* null_free = _gvn.transform(new GetNullFreePropertyNode(klass));
3419     // Deoptimize if null-free array
3420     Node* cmp = NULL;
3421     if (_gvn.type(klass)->isa_klassptr()) {
3422       cmp = new CmpLNode(null_free, zerocon(T_LONG));
3423     } else {
3424       cmp = new CmpINode(null_free, zerocon(T_INT));
3425     }
3426     cmp = _gvn.transform(cmp);
3427     Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3428     { BuildCutout unless(this, bol, PROB_MAX);
3429       inc_sp(nargs);
3430       uncommon_trap(Deoptimization::Reason_null_check,
3431                     Deoptimization::Action_none);
3432     }
3433     region->init_req(1, control());
3434   }
3435   region->init_req(2, control());
3436   set_control(_gvn.transform(region));
3437   record_for_igvn(region);
3438 }
3439 
3440 Node* GraphKit::load_lh_array_tag(Node* kls) {
3441   Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
3442   Node* layout_val = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), lhp, lhp->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
3443 
3444   return _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3445 }
3446 
3447 
3448 Node* GraphKit::gen_lh_array_test(Node* kls, unsigned int lh_value) {
3449   Node* layout_val = load_lh_array_tag(kls);
3450   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(lh_value)));
3451   return cmp;
3452 }
3453 
3454 
3455 //------------------------------next_monitor-----------------------------------
3456 // What number should be given to the next monitor?
3457 int GraphKit::next_monitor() {
3458   int current = jvms()->monitor_depth()* C->sync_stack_slots();
3459   int next = current + C->sync_stack_slots();
3460   // Keep the toplevel high water mark current:
3461   if (C->fixed_slots() < next)  C->set_fixed_slots(next);
3462   return current;
3463 }


3701   assert(init ->allocation()     == alloc, "2-way macro link must work");
3702   {
3703     // Extract memory strands which may participate in the new object's
3704     // initialization, and source them from the new InitializeNode.
3705     // This will allow us to observe initializations when they occur,
3706     // and link them properly (as a group) to the InitializeNode.
3707     assert(init->in(InitializeNode::Memory) == malloc, "");
3708     MergeMemNode* minit_in = MergeMemNode::make(malloc);
3709     init->set_req(InitializeNode::Memory, minit_in);
3710     record_for_igvn(minit_in); // fold it up later, if possible
3711     _gvn.set_type(minit_in, Type::MEMORY);
3712     Node* minit_out = memory(rawidx);
3713     assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3714     // Add an edge in the MergeMem for the header fields so an access
3715     // to one of those has correct memory state
3716     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
3717     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
3718     if (oop_type->isa_aryptr()) {
3719       const TypeAryPtr* arytype = oop_type->is_aryptr();
3720       if (arytype->klass()->is_value_array_klass()) {
3721         // Initially all flattened array accesses share a single slice
3722         // but that changes after parsing. Prepare the memory graph so
3723         // it can optimize flattened array accesses properly once they
3724         // don't share a single slice.
3725         assert(C->flattened_accesses_share_alias(), "should be set at parse time");
3726         C->set_flattened_accesses_share_alias(false);
3727         ciValueArrayKlass* vak = arytype->klass()->as_value_array_klass();
3728         ciValueKlass* vk = vak->element_klass()->as_value_klass();
3729         for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
3730           ciField* field = vk->nonstatic_field_at(i);
3731           if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
3732             continue;  // do not bother to track really large numbers of fields
3733           int off_in_vt = field->offset() - vk->first_field_offset();
3734           const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
3735           int fieldidx = C->get_alias_index(adr_type, true);
3736           hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3737         }
3738         C->set_flattened_accesses_share_alias(true);
3739         hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::VALUES), minit_in, minit_out);
3740       } else {
3741         const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3742         int            elemidx  = C->get_alias_index(telemref);
3743         hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3744       }
3745     } else if (oop_type->isa_instptr()) {
3746       set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
3747       ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
3748       for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3749         ciField* field = ik->nonstatic_field_at(i);
3750         if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
3751           continue;  // do not bother to track really large numbers of fields
3752         // Find (or create) the alias category for this field:
3753         int fieldidx = C->alias_type(field)->index();
3754         hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3755       }
3756     }
3757   }
3758 
3759   // Cast raw oop to the real thing...


< prev index next >