< prev index next >

src/share/vm/opto/macro.cpp

Print this page




 554         assert(false, "unknown node on this path");
 555 #endif
 556         return NULL;  // unknown node on this path
 557       }
 558     }
 559   }
 560   // Set Phi's inputs
 561   for (uint j = 1; j < length; j++) {
 562     if (values.at(j) == mem) {
 563       phi->init_req(j, phi);
 564     } else {
 565       phi->init_req(j, values.at(j));
 566     }
 567   }
 568   return phi;
 569 }
 570 
 571 // Search the last value stored into the object's field.
 572 Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc) {
 573   assert(adr_t->is_known_instance_field(), "instance required");
 574   assert(ft != T_VALUETYPE, "should not be used for value type fields");
 575   int instance_id = adr_t->instance_id();
 576   assert((uint)instance_id == alloc->_idx, "wrong allocation");
 577 
 578   int alias_idx = C->get_alias_index(adr_t);
 579   int offset = adr_t->offset();
 580   Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
 581   Node *alloc_ctrl = alloc->in(TypeFunc::Control);
 582   Node *alloc_mem = alloc->in(TypeFunc::Memory);
 583   Arena *a = Thread::current()->resource_area();
 584   VectorSet visited(a);
 585 
 586   bool done = sfpt_mem == alloc_mem;
 587   Node *mem = sfpt_mem;
 588   while (!done) {
 589     if (visited.test_set(mem->_idx)) {
 590       return NULL;  // found a loop, give up
 591     }
 592     mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
 593     if (mem == start_mem || mem == alloc_mem) {
 594       done = true;  // hit a sentinel, return appropriate 0 value


 658       Node* ctl = mem->in(0);
 659       Node* m = mem->in(TypeFunc::Memory);
 660       if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 661         // pin the loads in the uncommon trap path
 662         ctl = sfpt_ctl;
 663         m = sfpt_mem;
 664       }
 665       return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc);
 666     }
 667   }
 668   // Something went wrong.
 669   return NULL;
 670 }
 671 
 672 // Search the last value stored into the value type's fields.
 673 Node* PhaseMacroExpand::value_type_from_mem(Node* mem, Node* ctl, ciValueKlass* vk, const TypeAryPtr* adr_type, int offset, AllocateNode* alloc) {
 674   // Subtract the offset of the first field to account for the missing oop header
 675   offset -= vk->first_field_offset();
 676   // Create a new ValueTypeNode and retrieve the field values from memory
 677   ValueTypeNode* vt = ValueTypeNode::make(_igvn, vk)->as_ValueType();
 678   for (int i = 0; i < vk->field_count(); ++i) {
 679     ciType* field_type = vt->field_type(i);
 680     int field_offset = offset + vt->field_offset(i);
 681     // Each value type field has its own memory slice
 682     adr_type = adr_type->with_field_offset(field_offset);
 683     Node* value = NULL;
 684     if (field_type->basic_type() == T_VALUETYPE) {
 685       value = value_type_from_mem(mem, ctl, field_type->as_value_klass(), adr_type, field_offset, alloc);
 686     } else {
 687       value = value_from_mem(mem, ctl, field_type->basic_type(), Type::get_const_type(field_type), adr_type, alloc);










 688     }
 689     vt->set_field_value(i, value);
 690   }
 691   return vt;
 692 }
 693 
 694 // Check the possibility of scalar replacement.
 695 bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
 696   //  Scan the uses of the allocation to check for anything that would
 697   //  prevent us from eliminating it.
 698   NOT_PRODUCT( const char* fail_eliminate = NULL; )
 699   DEBUG_ONLY( Node* disq_node = NULL; )
 700   bool  can_eliminate = true;
 701 
 702   Node* res = alloc->result_cast();
 703   const TypeOopPtr* res_type = NULL;
 704   if (res == NULL) {
 705     // All users were eliminated.
 706   } else if (!res->is_CheckCastPP()) {
 707     NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)


 868     // of regular debuginfo at the last (youngest) JVMS.
 869     // Record relative start index.
 870     uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
 871     SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type,
 872 #ifdef ASSERT
 873                                                  alloc,
 874 #endif
 875                                                  first_ind, nfields);
 876     sobj->init_req(0, C->root());
 877     transform_later(sobj);
 878 
 879     // Scan object's fields adding an input to the safepoint for each field.
 880     for (int j = 0; j < nfields; j++) {
 881       intptr_t offset;
 882       ciField* field = NULL;
 883       if (iklass != NULL) {
 884         field = iklass->nonstatic_field_at(j);
 885         offset = field->offset();
 886         elem_type = field->type();
 887         basic_elem_type = field->layout_type();
 888         // Value type fields should not have safepoint uses
 889         assert(basic_elem_type != T_VALUETYPE, "value type fields are flattened");
 890       } else {
 891         offset = array_base + j * (intptr_t)element_size;
 892       }
 893 
 894       const Type *field_type;
 895       // The next code is taken from Parse::do_get_xxx().
 896       if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) {
 897         if (!elem_type->is_loaded()) {
 898           field_type = TypeInstPtr::BOTTOM;
 899         } else if (field != NULL && field->is_static_constant()) {
 900           // This can happen if the constant oop is non-perm.
 901           ciObject* con = field->constant_value().as_object();
 902           // Do not "join" in the previous type; it doesn't add value,
 903           // and may yield a vacuous result if the field is of interface type.
 904           field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 905           assert(field_type != NULL, "field singleton type must be consistent");
 906         } else {
 907           field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
 908         }
 909         if (UseCompressedOops) {


2633 
2634   Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) );
2635   mem_phi->init_req(1, memproj );
2636   mem_phi->init_req(2, mem);
2637   transform_later(mem_phi);
2638   _igvn.replace_node(_memproj_fallthrough, mem_phi);
2639 }
2640 
2641 // A value type is returned from the call but we don't know its
2642 // type. Either we get a buffered value (and nothing needs to be done)
2643 // or one of the values being returned is the klass of the value type
2644 // and we need to allocate a value type instance of that type and
2645 // initialize it with other values being returned. In that case, we
2646 // first try a fast path allocation and initialize the value with the
2647 // value klass's pack handler or we fall back to a runtime call.
2648 void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
2649   Node* ret = call->proj_out(TypeFunc::Parms);
2650   if (ret == NULL) {
2651     return;
2652   }
2653   assert(ret->bottom_type()->is_valuetypeptr()->klass() == C->env()->___Value_klass(), "unexpected return type from MH intrinsic");
2654   const TypeFunc* tf = call->_tf;
2655   const TypeTuple* domain = OptoRuntime::store_value_type_fields_Type()->domain_cc();
2656   const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain);
2657   call->_tf = new_tf;
2658   // Make sure the change of type is applied before projections are
2659   // processed by igvn
2660   _igvn.set_type(call, call->Value(&_igvn));
2661   _igvn.set_type(ret, ret->Value(&_igvn));
2662 
2663   // Before any new projection is added:
2664   CallProjections projs;
2665   call->extract_projections(&projs, true, true);
2666 
2667   Node* ctl = new Node(1);
2668   Node* mem = new Node(1);
2669   Node* io = new Node(1);
2670   Node* ex_ctl = new Node(1);
2671   Node* ex_mem = new Node(1);
2672   Node* ex_io = new Node(1);
2673   Node* res = new Node(1);


2750   ex_mem_phi->init_req(2, ex_mem);
2751   ex_io_phi->init_req(2, ex_io);
2752 
2753   transform_later(ex_r);
2754   transform_later(ex_mem_phi);
2755   transform_later(ex_io_phi);
2756 
2757   Node* slowpath_false = new IfFalseNode(slowpath_iff);
2758   transform_later(slowpath_false);
2759   Node* rawmem = new StorePNode(slowpath_false, mem, top_adr, TypeRawPtr::BOTTOM, new_top, MemNode::unordered);
2760   transform_later(rawmem);
2761   Node* mark_node = NULL;
2762   // For now only enable fast locking for non-array types
2763   if (UseBiasedLocking) {
2764     mark_node = make_load(slowpath_false, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2765   } else {
2766     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
2767   }
2768   rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
2769   rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);

2770   rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT);

2771   Node* pack_handler = make_load(slowpath_false, rawmem, klass_node, in_bytes(ValueKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2772 
2773   CallLeafNoFPNode* handler_call = new CallLeafNoFPNode(OptoRuntime::pack_value_type_Type(),
2774                                                         NULL,
2775                                                         "pack handler",
2776                                                         TypeRawPtr::BOTTOM);
2777   handler_call->init_req(TypeFunc::Control, slowpath_false);
2778   handler_call->init_req(TypeFunc::Memory, rawmem);
2779   handler_call->init_req(TypeFunc::I_O, top());
2780   handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2781   handler_call->init_req(TypeFunc::ReturnAdr, top());
2782   handler_call->init_req(TypeFunc::Parms, pack_handler);
2783   handler_call->init_req(TypeFunc::Parms+1, old_top);
2784 
2785   // We don't know how many values are returned. This assumes the
2786   // worst case, that all available registers are used.
2787   for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
2788     if (domain->field_at(i) == Type::HALF) {
2789       slow_call->init_req(i, top());
2790       handler_call->init_req(i+1, top());




 554         assert(false, "unknown node on this path");
 555 #endif
 556         return NULL;  // unknown node on this path
 557       }
 558     }
 559   }
 560   // Set Phi's inputs
 561   for (uint j = 1; j < length; j++) {
 562     if (values.at(j) == mem) {
 563       phi->init_req(j, phi);
 564     } else {
 565       phi->init_req(j, values.at(j));
 566     }
 567   }
 568   return phi;
 569 }
 570 
 571 // Search the last value stored into the object's field.
 572 Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc) {
 573   assert(adr_t->is_known_instance_field(), "instance required");

 574   int instance_id = adr_t->instance_id();
 575   assert((uint)instance_id == alloc->_idx, "wrong allocation");
 576 
 577   int alias_idx = C->get_alias_index(adr_t);
 578   int offset = adr_t->offset();
 579   Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
 580   Node *alloc_ctrl = alloc->in(TypeFunc::Control);
 581   Node *alloc_mem = alloc->in(TypeFunc::Memory);
 582   Arena *a = Thread::current()->resource_area();
 583   VectorSet visited(a);
 584 
 585   bool done = sfpt_mem == alloc_mem;
 586   Node *mem = sfpt_mem;
 587   while (!done) {
 588     if (visited.test_set(mem->_idx)) {
 589       return NULL;  // found a loop, give up
 590     }
 591     mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
 592     if (mem == start_mem || mem == alloc_mem) {
 593       done = true;  // hit a sentinel, return appropriate 0 value


 657       Node* ctl = mem->in(0);
 658       Node* m = mem->in(TypeFunc::Memory);
 659       if (sfpt_ctl->is_Proj() && sfpt_ctl->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 660         // pin the loads in the uncommon trap path
 661         ctl = sfpt_ctl;
 662         m = sfpt_mem;
 663       }
 664       return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc);
 665     }
 666   }
 667   // Something went wrong.
 668   return NULL;
 669 }
 670 
 671 // Search the last value stored into the value type's fields.
 672 Node* PhaseMacroExpand::value_type_from_mem(Node* mem, Node* ctl, ciValueKlass* vk, const TypeAryPtr* adr_type, int offset, AllocateNode* alloc) {
 673   // Subtract the offset of the first field to account for the missing oop header
 674   offset -= vk->first_field_offset();
 675   // Create a new ValueTypeNode and retrieve the field values from memory
 676   ValueTypeNode* vt = ValueTypeNode::make(_igvn, vk)->as_ValueType();
 677   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); ++i) {
 678     ciType* field_type = vt->field_type(i);
 679     int field_offset = offset + vt->field_offset(i);
 680     // Each value type field has its own memory slice
 681     adr_type = adr_type->with_field_offset(field_offset);
 682     Node* value = NULL;
 683     if (field_type->is_valuetype() && vt->field_is_flattened(i)) {
 684       value = value_type_from_mem(mem, ctl, field_type->as_value_klass(), adr_type, field_offset, alloc);
 685     } else {
 686       const Type* ft = Type::get_const_type(field_type);
 687       BasicType bt = field_type->basic_type();
 688       if (UseCompressedOops && !is_java_primitive(bt)) {
 689         ft = ft->make_narrowoop();
 690         bt = T_NARROWOOP;
 691       }
 692       value = value_from_mem(mem, ctl, bt, ft, adr_type, alloc);
 693       if (ft->isa_narrowoop()) {
 694         assert(UseCompressedOops, "unexpected narrow oop");
 695         value = transform_later(new DecodeNNode(value, value->get_ptr_type()));
 696       }
 697     }
 698     vt->set_field_value(i, value);
 699   }
 700   return vt;
 701 }
 702 
 703 // Check the possibility of scalar replacement.
 704 bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
 705   //  Scan the uses of the allocation to check for anything that would
 706   //  prevent us from eliminating it.
 707   NOT_PRODUCT( const char* fail_eliminate = NULL; )
 708   DEBUG_ONLY( Node* disq_node = NULL; )
 709   bool  can_eliminate = true;
 710 
 711   Node* res = alloc->result_cast();
 712   const TypeOopPtr* res_type = NULL;
 713   if (res == NULL) {
 714     // All users were eliminated.
 715   } else if (!res->is_CheckCastPP()) {
 716     NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)


 877     // of regular debuginfo at the last (youngest) JVMS.
 878     // Record relative start index.
 879     uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
 880     SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(res_type,
 881 #ifdef ASSERT
 882                                                  alloc,
 883 #endif
 884                                                  first_ind, nfields);
 885     sobj->init_req(0, C->root());
 886     transform_later(sobj);
 887 
 888     // Scan object's fields adding an input to the safepoint for each field.
 889     for (int j = 0; j < nfields; j++) {
 890       intptr_t offset;
 891       ciField* field = NULL;
 892       if (iklass != NULL) {
 893         field = iklass->nonstatic_field_at(j);
 894         offset = field->offset();
 895         elem_type = field->type();
 896         basic_elem_type = field->layout_type();
 897         assert(!field->is_flattened(), "flattened value type fields should not have safepoint uses");

 898       } else {
 899         offset = array_base + j * (intptr_t)element_size;
 900       }
 901 
 902       const Type *field_type;
 903       // The next code is taken from Parse::do_get_xxx().
 904       if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) {
 905         if (!elem_type->is_loaded()) {
 906           field_type = TypeInstPtr::BOTTOM;
 907         } else if (field != NULL && field->is_static_constant()) {
 908           // This can happen if the constant oop is non-perm.
 909           ciObject* con = field->constant_value().as_object();
 910           // Do not "join" in the previous type; it doesn't add value,
 911           // and may yield a vacuous result if the field is of interface type.
 912           field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 913           assert(field_type != NULL, "field singleton type must be consistent");
 914         } else {
 915           field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
 916         }
 917         if (UseCompressedOops) {


2641 
2642   Node *memproj = transform_later(new ProjNode(call, TypeFunc::Memory) );
2643   mem_phi->init_req(1, memproj );
2644   mem_phi->init_req(2, mem);
2645   transform_later(mem_phi);
2646   _igvn.replace_node(_memproj_fallthrough, mem_phi);
2647 }
2648 
2649 // A value type is returned from the call but we don't know its
2650 // type. Either we get a buffered value (and nothing needs to be done)
2651 // or one of the values being returned is the klass of the value type
2652 // and we need to allocate a value type instance of that type and
2653 // initialize it with other values being returned. In that case, we
2654 // first try a fast path allocation and initialize the value with the
2655 // value klass's pack handler or we fall back to a runtime call.
2656 void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
2657   Node* ret = call->proj_out(TypeFunc::Parms);
2658   if (ret == NULL) {
2659     return;
2660   }
2661   assert(ret->bottom_type()->is_valuetypeptr()->is__Value(), "unexpected return type from MH intrinsic");
2662   const TypeFunc* tf = call->_tf;
2663   const TypeTuple* domain = OptoRuntime::store_value_type_fields_Type()->domain_cc();
2664   const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain);
2665   call->_tf = new_tf;
2666   // Make sure the change of type is applied before projections are
2667   // processed by igvn
2668   _igvn.set_type(call, call->Value(&_igvn));
2669   _igvn.set_type(ret, ret->Value(&_igvn));
2670 
2671   // Before any new projection is added:
2672   CallProjections projs;
2673   call->extract_projections(&projs, true, true);
2674 
2675   Node* ctl = new Node(1);
2676   Node* mem = new Node(1);
2677   Node* io = new Node(1);
2678   Node* ex_ctl = new Node(1);
2679   Node* ex_mem = new Node(1);
2680   Node* ex_io = new Node(1);
2681   Node* res = new Node(1);


2758   ex_mem_phi->init_req(2, ex_mem);
2759   ex_io_phi->init_req(2, ex_io);
2760 
2761   transform_later(ex_r);
2762   transform_later(ex_mem_phi);
2763   transform_later(ex_io_phi);
2764 
2765   Node* slowpath_false = new IfFalseNode(slowpath_iff);
2766   transform_later(slowpath_false);
2767   Node* rawmem = new StorePNode(slowpath_false, mem, top_adr, TypeRawPtr::BOTTOM, new_top, MemNode::unordered);
2768   transform_later(rawmem);
2769   Node* mark_node = NULL;
2770   // For now only enable fast locking for non-array types
2771   if (UseBiasedLocking) {
2772     mark_node = make_load(slowpath_false, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2773   } else {
2774     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
2775   }
2776   rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
2777   rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
2778   if (UseCompressedClassPointers) {
2779     rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT);
2780   }
2781   Node* pack_handler = make_load(slowpath_false, rawmem, klass_node, in_bytes(ValueKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
2782 
2783   CallLeafNoFPNode* handler_call = new CallLeafNoFPNode(OptoRuntime::pack_value_type_Type(),
2784                                                         NULL,
2785                                                         "pack handler",
2786                                                         TypeRawPtr::BOTTOM);
2787   handler_call->init_req(TypeFunc::Control, slowpath_false);
2788   handler_call->init_req(TypeFunc::Memory, rawmem);
2789   handler_call->init_req(TypeFunc::I_O, top());
2790   handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2791   handler_call->init_req(TypeFunc::ReturnAdr, top());
2792   handler_call->init_req(TypeFunc::Parms, pack_handler);
2793   handler_call->init_req(TypeFunc::Parms+1, old_top);
2794 
2795   // We don't know how many values are returned. This assumes the
2796   // worst case, that all available registers are used.
2797   for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
2798     if (domain->field_at(i) == Type::HALF) {
2799       slow_call->init_req(i, top());
2800       handler_call->init_req(i+1, top());


< prev index next >