src/share/vm/opto/library_call.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/library_call.cpp

Print this page
rev 8008 : 8073866: Fix for 8064703 is not sufficient
Summary: side effects between allocation and arraycopy can be reexecuted, unreachable uninitialized array can be seen by GCs
Reviewed-by:


 245   bool inline_native_time_funcs(address method, const char* funcName);
 246   bool inline_native_isInterrupted();
 247   bool inline_native_Class_query(vmIntrinsics::ID id);
 248   bool inline_native_subtype_check();
 249 
 250   bool inline_native_newArray();
 251   bool inline_native_getLength();
 252   bool inline_array_copyOf(bool is_copyOfRange);
 253   bool inline_array_equals();
 254   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
 255   bool inline_native_clone(bool is_virtual);
 256   bool inline_native_Reflection_getCallerClass();
 257   // Helper function for inlining native object hash method
 258   bool inline_native_hashcode(bool is_virtual, bool is_static);
 259   bool inline_native_getClass();
 260 
 261   // Helper functions for inlining arraycopy
 262   bool inline_arraycopy();
 263   AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
 264                                                 RegionNode* slow_region);



 265   typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
 266   bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
 267   bool inline_unsafe_ordered_store(BasicType type);
 268   bool inline_unsafe_fence(vmIntrinsics::ID id);
 269   bool inline_fp_conversions(vmIntrinsics::ID id);
 270   bool inline_number_methods(vmIntrinsics::ID id);
 271   bool inline_reference_get();
 272   bool inline_Class_cast();
 273   bool inline_aescrypt_Block(vmIntrinsics::ID id);
 274   bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
 275   Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
 276   Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
 277   Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
 278   bool inline_sha_implCompress(vmIntrinsics::ID id);
 279   bool inline_digestBase_implCompressMB(int predicate);
 280   bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
 281                                  bool long_state, address stubAddr, const char *stubName,
 282                                  Node* src_start, Node* ofs, Node* limit);
 283   Node* get_state_from_sha_object(Node *sha_object);
 284   Node* get_state_from_sha5_object(Node *sha_object);


4657       PreserveJVMState pjvms(this);
4658       CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4659       Node* slow_result = set_results_for_java_call(slow_call);
4660       // this->control() comes from set_results_for_java_call
4661       result_reg->init_req(_slow_path, control());
4662       result_val->init_req(_slow_path, slow_result);
4663       result_i_o ->set_req(_slow_path, i_o());
4664       result_mem ->set_req(_slow_path, reset_memory());
4665     }
4666 
4667     // Return the combined state.
4668     set_control(    _gvn.transform(result_reg));
4669     set_i_o(        _gvn.transform(result_i_o));
4670     set_all_memory( _gvn.transform(result_mem));
4671   } // original reexecute is set back here
4672 
4673   set_result(_gvn.transform(result_val));
4674   return true;
4675 }
4676 






































































































































4677 //------------------------------inline_arraycopy-----------------------
4678 // public static native void java.lang.System.arraycopy(Object src,  int  srcPos,
4679 //                                                      Object dest, int destPos,
4680 //                                                      int length);
4681 bool LibraryCallKit::inline_arraycopy() {
4682   // Get the arguments.
4683   Node* src         = argument(0);  // type: oop
4684   Node* src_offset  = argument(1);  // type: int
4685   Node* dest        = argument(2);  // type: oop
4686   Node* dest_offset = argument(3);  // type: int
4687   Node* length      = argument(4);  // type: int
4688 








4689   // The following tests must be performed
4690   // (1) src and dest are arrays.
4691   // (2) src and dest arrays must have elements of the same BasicType
4692   // (3) src and dest must not be null.
4693   // (4) src_offset must not be negative.
4694   // (5) dest_offset must not be negative.
4695   // (6) length must not be negative.
4696   // (7) src_offset + length must not exceed length of src.
4697   // (8) dest_offset + length must not exceed length of dest.
4698   // (9) each element of an oop array must be assignable
4699 
4700   // (3) src and dest must not be null.
4701   // always do this here because we need the JVM state for uncommon traps
4702   src  = null_check(src,  T_ARRAY);


4703   dest = null_check(dest, T_ARRAY);
4704 
4705   // Check for allocation before we add nodes that would confuse
4706   // tightly_coupled_allocation()
4707   AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
4708 
4709   ciMethod* trap_method = method();
4710   int trap_bci = bci();
4711   SafePointNode* sfpt = NULL;
4712   if (alloc != NULL) {
4713     // The JVM state for uncommon traps between the allocation and
4714     // arraycopy is set to the state before the allocation: if the
4715     // initialization is performed by the array copy, we don't want to
4716     // go back to the interpreter with an unitialized array.
4717     JVMState* old_jvms = alloc->jvms();
4718     JVMState* jvms = old_jvms->clone_shallow(C);
4719     uint size = alloc->req();
4720     sfpt = new SafePointNode(size, jvms);
4721     jvms->set_map(sfpt);
4722     for (uint i = 0; i < size; i++) {
4723       sfpt->init_req(i, alloc->in(i));
4724     }
4725     // re-push array length for deoptimization
4726     sfpt->ins_req(jvms->stkoff() + jvms->sp(), alloc->in(AllocateNode::ALength));
4727     jvms->set_sp(jvms->sp()+1);
4728     jvms->set_monoff(jvms->monoff()+1);
4729     jvms->set_scloff(jvms->scloff()+1);
4730     jvms->set_endoff(jvms->endoff()+1);
4731     jvms->set_should_reexecute(true);
4732 
4733     sfpt->set_i_o(map()->i_o());
4734     sfpt->set_memory(map()->memory());
4735 
4736     trap_method = jvms->method();
4737     trap_bci = jvms->bci();
4738   }
4739 
4740   bool validated = false;
4741 
4742   const Type* src_type  = _gvn.type(src);
4743   const Type* dest_type = _gvn.type(dest);
4744   const TypeAryPtr* top_src  = src_type->isa_aryptr();
4745   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4746 
4747   // Do we have the type of src?
4748   bool has_src = (top_src != NULL && top_src->klass() != NULL);
4749   // Do we have the type of dest?
4750   bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4751   // Is the type for src from speculation?
4752   bool src_spec = false;
4753   // Is the type for dest from speculation?
4754   bool dest_spec = false;
4755 
4756   if (!has_src || !has_dest) {
4757     // We don't have sufficient type information, let's see if
4758     // speculative types can help. We need to have types for both src
4759     // and dest so that it pays off.
4760 
4761     // Do we already have or could we have type information for src
4762     bool could_have_src = has_src;
4763     // Do we already have or could we have type information for dest
4764     bool could_have_dest = has_dest;
4765 
4766     ciKlass* src_k = NULL;
4767     if (!has_src) {
4768       src_k = src_type->speculative_type_not_null();
4769       if (src_k != NULL && src_k->is_array_klass()) {
4770         could_have_src = true;
4771       }
4772     }
4773 
4774     ciKlass* dest_k = NULL;
4775     if (!has_dest) {
4776       dest_k = dest_type->speculative_type_not_null();
4777       if (dest_k != NULL && dest_k->is_array_klass()) {
4778         could_have_dest = true;
4779       }
4780     }
4781 
4782     if (could_have_src && could_have_dest) {
4783       // This is going to pay off so emit the required guards
4784       if (!has_src) {
4785         src = maybe_cast_profiled_obj(src, src_k, true, sfpt);
4786         src_type  = _gvn.type(src);
4787         top_src  = src_type->isa_aryptr();
4788         has_src = (top_src != NULL && top_src->klass() != NULL);
4789         src_spec = true;
4790       }
4791       if (!has_dest) {
4792         dest = maybe_cast_profiled_obj(dest, dest_k, true);
4793         dest_type  = _gvn.type(dest);
4794         top_dest  = dest_type->isa_aryptr();
4795         has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4796         dest_spec = true;
4797       }
4798     }
4799   }
4800 
4801   if (has_src && has_dest) {
4802     BasicType src_elem  = top_src->klass()->as_array_klass()->element_type()->basic_type();
4803     BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4804     if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
4805     if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
4806 
4807     if (src_elem == dest_elem && src_elem == T_OBJECT) {
4808       // If both arrays are object arrays then having the exact types
4809       // for both will remove the need for a subtype check at runtime
4810       // before the call and may make it possible to pick a faster copy
4811       // routine (without a subtype check on every element)
4812       // Do we have the exact type of src?
4813       bool could_have_src = src_spec;
4814       // Do we have the exact type of dest?
4815       bool could_have_dest = dest_spec;
4816       ciKlass* src_k = top_src->klass();
4817       ciKlass* dest_k = top_dest->klass();
4818       if (!src_spec) {
4819         src_k = src_type->speculative_type_not_null();
4820         if (src_k != NULL && src_k->is_array_klass()) {
4821           could_have_src = true;
4822         }
4823       }
4824       if (!dest_spec) {
4825         dest_k = dest_type->speculative_type_not_null();
4826         if (dest_k != NULL && dest_k->is_array_klass()) {
4827           could_have_dest = true;
4828         }
4829       }
4830       if (could_have_src && could_have_dest) {
4831         // If we can have both exact types, emit the missing guards
4832         if (could_have_src && !src_spec) {
4833           src = maybe_cast_profiled_obj(src, src_k, true, sfpt);
4834         }
4835         if (could_have_dest && !dest_spec) {
4836           dest = maybe_cast_profiled_obj(dest, dest_k, true);
4837         }
4838       }
4839     }
4840   }
4841 
4842   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) && !src->is_top() && !dest->is_top()) {









4843     // validate arguments: enables transformation the ArrayCopyNode
4844     validated = true;
4845 
4846     RegionNode* slow_region = new RegionNode(1);
4847     record_for_igvn(slow_region);
4848 
4849     // (1) src and dest are arrays.
4850     generate_non_array_guard(load_object_klass(src), slow_region);
4851     generate_non_array_guard(load_object_klass(dest), slow_region);
4852 
4853     // (2) src and dest arrays must have elements of the same BasicType
4854     // done at macro expansion or at Ideal transformation time
4855 
4856     // (4) src_offset must not be negative.
4857     generate_negative_guard(src_offset, slow_region);
4858 
4859     // (5) dest_offset must not be negative.
4860     generate_negative_guard(dest_offset, slow_region);
4861 
4862     // (7) src_offset + length must not exceed length of src.
4863     generate_limit_guard(src_offset, length,
4864                          load_array_length(src),
4865                          slow_region);
4866 
4867     // (8) dest_offset + length must not exceed length of dest.
4868     generate_limit_guard(dest_offset, length,
4869                          load_array_length(dest),
4870                          slow_region);
4871 
4872     // (9) each element of an oop array must be assignable
4873     Node* src_klass  = load_object_klass(src);
4874     Node* dest_klass = load_object_klass(dest);
4875     Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
4876 
4877     if (not_subtype_ctrl != top()) {
4878       if (sfpt != NULL) {
4879         GraphKit kit(sfpt->jvms());
4880         PreserveJVMState pjvms(&kit);
4881         kit.set_control(not_subtype_ctrl);
4882         kit.uncommon_trap(Deoptimization::Reason_intrinsic,
4883                           Deoptimization::Action_make_not_entrant);
4884         assert(kit.stopped(), "Should be stopped");
4885       } else {
4886         PreserveJVMState pjvms(this);
4887         set_control(not_subtype_ctrl);
4888         uncommon_trap(Deoptimization::Reason_intrinsic,
4889                       Deoptimization::Action_make_not_entrant);
4890         assert(stopped(), "Should be stopped");
4891       }
4892     }
4893     if (sfpt != NULL) {
4894       GraphKit kit(sfpt->jvms());
4895       kit.set_control(_gvn.transform(slow_region));
4896       kit.uncommon_trap(Deoptimization::Reason_intrinsic,
4897                         Deoptimization::Action_make_not_entrant);
4898       assert(kit.stopped(), "Should be stopped");
4899     } else {
4900       PreserveJVMState pjvms(this);
4901       set_control(_gvn.transform(slow_region));
4902       uncommon_trap(Deoptimization::Reason_intrinsic,
4903                     Deoptimization::Action_make_not_entrant);
4904       assert(stopped(), "Should be stopped");
4905     }
4906   }


4907 
4908   if (stopped()) {
4909     return true;
4910   }
4911 
4912   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
4913                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
4914                                           // so the compiler has a chance to eliminate them: during macro expansion,
4915                                           // we have to set their control (CastPP nodes are eliminated).
4916                                           load_object_klass(src), load_object_klass(dest),
4917                                           load_array_length(src), load_array_length(dest));
4918 
4919   ac->set_arraycopy(validated);
4920 
4921   Node* n = _gvn.transform(ac);
4922   if (n == ac) {
4923     ac->connect_outputs(this);
4924   } else {
4925     assert(validated, "shouldn't transform if all arguments not validated");
4926     set_all_memory(n);




 245   bool inline_native_time_funcs(address method, const char* funcName);
 246   bool inline_native_isInterrupted();
 247   bool inline_native_Class_query(vmIntrinsics::ID id);
 248   bool inline_native_subtype_check();
 249 
 250   bool inline_native_newArray();
 251   bool inline_native_getLength();
 252   bool inline_array_copyOf(bool is_copyOfRange);
 253   bool inline_array_equals();
 254   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
 255   bool inline_native_clone(bool is_virtual);
 256   bool inline_native_Reflection_getCallerClass();
 257   // Helper function for inlining native object hash method
 258   bool inline_native_hashcode(bool is_virtual, bool is_static);
 259   bool inline_native_getClass();
 260 
 261   // Helper functions for inlining arraycopy
 262   bool inline_arraycopy();
 263   AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
 264                                                 RegionNode* slow_region);
 265   JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
 266   void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp);
 267 
 268   typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
 269   bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
 270   bool inline_unsafe_ordered_store(BasicType type);
 271   bool inline_unsafe_fence(vmIntrinsics::ID id);
 272   bool inline_fp_conversions(vmIntrinsics::ID id);
 273   bool inline_number_methods(vmIntrinsics::ID id);
 274   bool inline_reference_get();
 275   bool inline_Class_cast();
 276   bool inline_aescrypt_Block(vmIntrinsics::ID id);
 277   bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
 278   Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
 279   Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
 280   Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
 281   bool inline_sha_implCompress(vmIntrinsics::ID id);
 282   bool inline_digestBase_implCompressMB(int predicate);
 283   bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
 284                                  bool long_state, address stubAddr, const char *stubName,
 285                                  Node* src_start, Node* ofs, Node* limit);
 286   Node* get_state_from_sha_object(Node *sha_object);
 287   Node* get_state_from_sha5_object(Node *sha_object);


4660       PreserveJVMState pjvms(this);
4661       CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4662       Node* slow_result = set_results_for_java_call(slow_call);
4663       // this->control() comes from set_results_for_java_call
4664       result_reg->init_req(_slow_path, control());
4665       result_val->init_req(_slow_path, slow_result);
4666       result_i_o ->set_req(_slow_path, i_o());
4667       result_mem ->set_req(_slow_path, reset_memory());
4668     }
4669 
4670     // Return the combined state.
4671     set_control(    _gvn.transform(result_reg));
4672     set_i_o(        _gvn.transform(result_i_o));
4673     set_all_memory( _gvn.transform(result_mem));
4674   } // original reexecute is set back here
4675 
4676   set_result(_gvn.transform(result_val));
4677   return true;
4678 }
4679 
4680 // If we have a tighly coupled allocation, the arraycopy may take care
4681 // of the array initialization. If one of the guards we insert between
4682 // the allocation and the arraycopy causes a deoptimization, an
4683 // unitialized array will escape the compiled method. To prevent that
4684 // we set the JVM state for uncommon traps between the allocation and
4685 // the arraycopy to the state before the allocation so, in case of
4686 // deoptimization, we'll reexecute the allocation and the
4687 // initialization.
4688 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
4689   if (alloc != NULL) {
4690     ciMethod* trap_method = alloc->jvms()->method();
4691     int trap_bci = alloc->jvms()->bci();
4692 
4693     if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &
4694           !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
4695       // Make sure there's no store between the allocation and the
4696       // arraycopy otherwise visible side effects could be rexecuted
4697       // in case of deoptimization and cause incorrect execution.
4698       bool no_interfering_store = true;
4699       Node* mem = alloc->in(TypeFunc::Memory);
4700       if (mem->is_MergeMem()) {
4701         for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
4702           Node* n = mms.memory();
4703           if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4704             assert(n->is_Store(), "what else?");
4705             no_interfering_store = false;
4706             break;
4707           }
4708         }
4709       } else {
4710         for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
4711           Node* n = mms.memory();
4712           if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4713             assert(n->is_Store(), "what else?");
4714             no_interfering_store = false;
4715             break;
4716           }
4717         }
4718       }
4719 
4720       if (no_interfering_store) {
4721         JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
4722         uint size = alloc->req();
4723         SafePointNode* sfpt = new SafePointNode(size, old_jvms);
4724         old_jvms->set_map(sfpt);
4725         for (uint i = 0; i < size; i++) {
4726           sfpt->init_req(i, alloc->in(i));
4727         }
4728         // re-push array length for deoptimization
4729         sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
4730         old_jvms->set_sp(old_jvms->sp()+1);
4731         old_jvms->set_monoff(old_jvms->monoff()+1);
4732         old_jvms->set_scloff(old_jvms->scloff()+1);
4733         old_jvms->set_endoff(old_jvms->endoff()+1);
4734         old_jvms->set_should_reexecute(true);
4735       
4736         sfpt->set_i_o(map()->i_o());
4737         sfpt->set_memory(map()->memory());
4738         sfpt->set_control(map()->control());
4739 
4740         JVMState* saved_jvms = jvms();
4741         saved_reexecute_sp = _reexecute_sp;
4742         
4743         set_jvms(sfpt->jvms());
4744         _reexecute_sp = jvms()->sp();
4745         
4746         return saved_jvms;
4747       }
4748     }
4749   }
4750   return NULL;
4751 }
4752 
4753 // In case of a deoptimization, we restart execution at the
4754 // allocation, allocating a new array. We would leave an uninitialized
4755 // array in the heap that GCs wouldn't expect. Move the allocation
4756 // after the traps so we don't allocate the array if we
4757 // deoptimize. This is possible because tightly_coupled_allocation()
4758 // guarantees there's no observer of the allocated array at this point
4759 // and the control flow is simple enough.
4760 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp) {
4761   if (saved_jvms != NULL) {
4762     // restore JVM state to the state at the arraycopy
4763     saved_jvms->map()->set_control(map()->control());
4764     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4765     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4766     // If we've improved the types of some nodes (null check) while
4767     // emitting the guards, propagate them to the current state
4768     map()->replaced_nodes().apply(saved_jvms->map());
4769     set_jvms(saved_jvms);
4770     _reexecute_sp = saved_reexecute_sp;
4771 
4772     // Remove the allocation from above the guards
4773     CallProjections callprojs;
4774     alloc->extract_projections(&callprojs, true);
4775     InitializeNode* init = alloc->initialization();
4776     Node* alloc_mem = alloc->in(TypeFunc::Memory);
4777     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4778     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4779     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4780 
4781     // move the allocation here (after the guards)
4782     _gvn.hash_delete(alloc);
4783     alloc->set_req(TypeFunc::Control, control());
4784     alloc->set_req(TypeFunc::I_O, i_o());
4785     Node *mem = reset_memory();
4786     set_all_memory(mem);
4787     alloc->set_req(TypeFunc::Memory, mem);
4788     set_control(init->proj_out(TypeFunc::Control));
4789     set_i_o(callprojs.fallthrough_ioproj);
4790 
4791     // Update memory as done in GraphKit::set_output_for_allocation()
4792     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4793     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4794     if (ary_type->isa_aryptr() && length_type != NULL) {
4795       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4796     }
4797     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4798     int            elemidx  = C->get_alias_index(telemref);
4799     set_memory(init->proj_out(TypeFunc::Memory), Compile::AliasIdxRaw);
4800     set_memory(init->proj_out(TypeFunc::Memory), elemidx);
4801     
4802     Node* allocx = _gvn.transform(alloc);
4803     assert(allocx == alloc, "where has the allocation gone?");
4804     assert(dest->is_CheckCastPP(), "not an allocation result?");
4805 
4806     _gvn.hash_delete(dest);
4807     dest->set_req(0, control());
4808     Node* destx = _gvn.transform(dest);
4809     assert(destx == dest, "where has the allocation result gone?");
4810   }
4811 }
4812 
4813 
4814 //------------------------------inline_arraycopy-----------------------
4815 // public static native void java.lang.System.arraycopy(Object src,  int  srcPos,
4816 //                                                      Object dest, int destPos,
4817 //                                                      int length);
4818 bool LibraryCallKit::inline_arraycopy() {
4819   // Get the arguments.
4820   Node* src         = argument(0);  // type: oop
4821   Node* src_offset  = argument(1);  // type: int
4822   Node* dest        = argument(2);  // type: oop
4823   Node* dest_offset = argument(3);  // type: int
4824   Node* length      = argument(4);  // type: int
4825 
4826 
4827   // Check for allocation before we add nodes that would confuse
4828   // tightly_coupled_allocation()
4829   AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
4830 
4831   int saved_reexecute_sp = -1;
4832   JVMState* saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
4833 
4834   // The following tests must be performed
4835   // (1) src and dest are arrays.
4836   // (2) src and dest arrays must have elements of the same BasicType
4837   // (3) src and dest must not be null.
4838   // (4) src_offset must not be negative.
4839   // (5) dest_offset must not be negative.
4840   // (6) length must not be negative.
4841   // (7) src_offset + length must not exceed length of src.
4842   // (8) dest_offset + length must not exceed length of dest.
4843   // (9) each element of an oop array must be assignable
4844 
4845   // (3) src and dest must not be null.
4846   // always do this here because we need the JVM state for uncommon traps
4847   Node* null_ctl = top();
4848   src  = saved_jvms != NULL ? null_check_oop(src, &null_ctl, true, true) : null_check(src,  T_ARRAY);
4849   assert(null_ctl->is_top(), "no null control here");
4850   dest = null_check(dest, T_ARRAY);
4851 
4852   if (saved_jvms == NULL && alloc != NULL) {
4853     // We're not emitting the guards, see if we have a tightly
4854     // allocation now that we've done the null check
4855     alloc = tightly_coupled_allocation(dest, NULL);





























4856   }
4857 
4858   bool validated = false;
4859 
4860   const Type* src_type  = _gvn.type(src);
4861   const Type* dest_type = _gvn.type(dest);
4862   const TypeAryPtr* top_src  = src_type->isa_aryptr();
4863   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4864 
4865   // Do we have the type of src?
4866   bool has_src = (top_src != NULL && top_src->klass() != NULL);
4867   // Do we have the type of dest?
4868   bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4869   // Is the type for src from speculation?
4870   bool src_spec = false;
4871   // Is the type for dest from speculation?
4872   bool dest_spec = false;
4873 
4874   if ((!has_src || !has_dest) && (alloc == NULL || saved_jvms != NULL)) {
4875     // We don't have sufficient type information, let's see if
4876     // speculative types can help. We need to have types for both src
4877     // and dest so that it pays off.
4878 
4879     // Do we already have or could we have type information for src
4880     bool could_have_src = has_src;
4881     // Do we already have or could we have type information for dest
4882     bool could_have_dest = has_dest;
4883 
4884     ciKlass* src_k = NULL;
4885     if (!has_src) {
4886       src_k = src_type->speculative_type_not_null();
4887       if (src_k != NULL && src_k->is_array_klass()) {
4888         could_have_src = true;
4889       }
4890     }
4891 
4892     ciKlass* dest_k = NULL;
4893     if (!has_dest) {
4894       dest_k = dest_type->speculative_type_not_null();
4895       if (dest_k != NULL && dest_k->is_array_klass()) {
4896         could_have_dest = true;
4897       }
4898     }
4899 
4900     if (could_have_src && could_have_dest) {
4901       // This is going to pay off so emit the required guards
4902       if (!has_src) {
4903         src = maybe_cast_profiled_obj(src, src_k, true);
4904         src_type  = _gvn.type(src);
4905         top_src  = src_type->isa_aryptr();
4906         has_src = (top_src != NULL && top_src->klass() != NULL);
4907         src_spec = true;
4908       }
4909       if (!has_dest) {
4910         dest = maybe_cast_profiled_obj(dest, dest_k, true);
4911         dest_type  = _gvn.type(dest);
4912         top_dest  = dest_type->isa_aryptr();
4913         has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4914         dest_spec = true;
4915       }
4916     }
4917   }
4918 
4919   if (has_src && has_dest && (alloc == NULL || saved_jvms != NULL)) {
4920     BasicType src_elem  = top_src->klass()->as_array_klass()->element_type()->basic_type();
4921     BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4922     if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
4923     if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
4924 
4925     if (src_elem == dest_elem && src_elem == T_OBJECT) {
4926       // If both arrays are object arrays then having the exact types
4927       // for both will remove the need for a subtype check at runtime
4928       // before the call and may make it possible to pick a faster copy
4929       // routine (without a subtype check on every element)
4930       // Do we have the exact type of src?
4931       bool could_have_src = src_spec;
4932       // Do we have the exact type of dest?
4933       bool could_have_dest = dest_spec;
4934       ciKlass* src_k = top_src->klass();
4935       ciKlass* dest_k = top_dest->klass();
4936       if (!src_spec) {
4937         src_k = src_type->speculative_type_not_null();
4938         if (src_k != NULL && src_k->is_array_klass()) {
4939           could_have_src = true;
4940         }
4941       }
4942       if (!dest_spec) {
4943         dest_k = dest_type->speculative_type_not_null();
4944         if (dest_k != NULL && dest_k->is_array_klass()) {
4945           could_have_dest = true;
4946         }
4947       }
4948       if (could_have_src && could_have_dest) {
4949         // If we can have both exact types, emit the missing guards
4950         if (could_have_src && !src_spec) {
4951           src = maybe_cast_profiled_obj(src, src_k, true);
4952         }
4953         if (could_have_dest && !dest_spec) {
4954           dest = maybe_cast_profiled_obj(dest, dest_k, true);
4955         }
4956       }
4957     }
4958   }
4959 
4960   ciMethod* trap_method = method();
4961   int trap_bci = bci();
4962   if (saved_jvms != NULL) {
4963     trap_method = alloc->jvms()->method();
4964     trap_bci = alloc->jvms()->bci();
4965   }
4966 
4967   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
4968       (alloc == NULL || saved_jvms != NULL) &&
4969       !src->is_top() && !dest->is_top()) {
4970     // validate arguments: enables transformation the ArrayCopyNode
4971     validated = true;
4972 
4973     RegionNode* slow_region = new RegionNode(1);
4974     record_for_igvn(slow_region);
4975 
4976     // (1) src and dest are arrays.
4977     generate_non_array_guard(load_object_klass(src), slow_region);
4978     generate_non_array_guard(load_object_klass(dest), slow_region);
4979 
4980     // (2) src and dest arrays must have elements of the same BasicType
4981     // done at macro expansion or at Ideal transformation time
4982 
4983     // (4) src_offset must not be negative.
4984     generate_negative_guard(src_offset, slow_region);
4985 
4986     // (5) dest_offset must not be negative.
4987     generate_negative_guard(dest_offset, slow_region);
4988 
4989     // (7) src_offset + length must not exceed length of src.
4990     generate_limit_guard(src_offset, length,
4991                          load_array_length(src),
4992                          slow_region);
4993 
4994     // (8) dest_offset + length must not exceed length of dest.
4995     generate_limit_guard(dest_offset, length,
4996                          load_array_length(dest),
4997                          slow_region);
4998 
4999     // (9) each element of an oop array must be assignable
5000     Node* src_klass  = load_object_klass(src);
5001     Node* dest_klass = load_object_klass(dest);
5002     Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
5003 
5004     if (not_subtype_ctrl != top()) {








5005       PreserveJVMState pjvms(this);
5006       set_control(not_subtype_ctrl);
5007       uncommon_trap(Deoptimization::Reason_intrinsic,
5008                     Deoptimization::Action_make_not_entrant);
5009       assert(stopped(), "Should be stopped");
5010     }
5011     {







5012       PreserveJVMState pjvms(this);
5013       set_control(_gvn.transform(slow_region));
5014       uncommon_trap(Deoptimization::Reason_intrinsic,
5015                     Deoptimization::Action_make_not_entrant);
5016       assert(stopped(), "Should be stopped");
5017     }
5018   }
5019 
5020   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp);
5021 
5022   if (stopped()) {
5023     return true;
5024   }
5025 
5026   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
5027                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5028                                           // so the compiler has a chance to eliminate them: during macro expansion,
5029                                           // we have to set their control (CastPP nodes are eliminated).
5030                                           load_object_klass(src), load_object_klass(dest),
5031                                           load_array_length(src), load_array_length(dest));
5032 
5033   ac->set_arraycopy(validated);
5034 
5035   Node* n = _gvn.transform(ac);
5036   if (n == ac) {
5037     ac->connect_outputs(this);
5038   } else {
5039     assert(validated, "shouldn't transform if all arguments not validated");
5040     set_all_memory(n);


src/share/vm/opto/library_call.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File