4372 base_off += sizeof(int);
4373 } else {
4374 // Include klass to copy by 8 bytes words.
4375 base_off = instanceOopDesc::klass_offset_in_bytes();
4376 }
4377 assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4378 }
4379 src = basic_plus_adr(src, base_off);
4380 dest = basic_plus_adr(dest, base_off);
4381
4382 // Compute the length also, if needed:
4383 Node* countx = size;
4384 countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
4385 countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong) ));
4386
4387 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4388
4389 ArrayCopyNode* ac = ArrayCopyNode::make(this, false, src, NULL, dest, NULL, countx, false);
4390 ac->set_clonebasic();
4391 Node* n = _gvn.transform(ac);
4392 assert(n == ac, "cannot disappear");
4393 set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
4394
4395 // If necessary, emit some card marks afterwards. (Non-arrays only.)
4396 if (card_mark) {
4397 assert(!is_array, "");
4398 // Put in store barrier for any and all oops we are sticking
4399 // into this object. (We could avoid this if we could prove
4400 // that the object type contains no oop fields at all.)
4401 Node* no_particular_value = NULL;
4402 Node* no_particular_field = NULL;
4403 int raw_adr_idx = Compile::AliasIdxRaw;
4404 post_barrier(control(),
4405 memory(raw_adr_type),
4406 alloc_obj,
4407 no_particular_field,
4408 raw_adr_idx,
4409 no_particular_value,
4410 T_OBJECT,
4411 false);
4412 }
4413
4438 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4439 //
4440 // Copying also has two cases, oop arrays and everything else.
4441 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4442 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4443 //
4444 // These steps fold up nicely if and when the cloned object's klass
4445 // can be sharply typed as an object array, a type array, or an instance.
4446 //
4447 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4448 PhiNode* result_val;
4449
4450 // Set the reexecute bit for the interpreter to reexecute
4451 // the bytecode that invokes Object.clone if deoptimization happens.
4452 { PreserveReexecuteState preexecs(this);
4453 jvms()->set_should_reexecute(true);
4454
4455 Node* obj = null_check_receiver();
4456 if (stopped()) return true;
4457
4458 Node* obj_klass = load_object_klass(obj);
4459 const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4460 const TypeOopPtr* toop = ((tklass != NULL)
4461 ? tklass->as_instance_type()
4462 : TypeInstPtr::NOTNULL);
4463
4464 // Conservatively insert a memory barrier on all memory slices.
4465 // Do not let writes into the original float below the clone.
4466 insert_mem_bar(Op_MemBarCPUOrder);
4467
4468 // paths into result_reg:
4469 enum {
4470 _slow_path = 1, // out-of-line call to clone method (virtual or not)
4471 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4472 _array_path, // plain array allocation, plus arrayof_long_arraycopy
4473 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4474 PATH_LIMIT
4475 };
4476 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4477 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4614 // Check for allocation before we add nodes that would confuse
4615 // tightly_coupled_allocation()
4616 AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
4617
4618 // The following tests must be performed
4619 // (1) src and dest are arrays.
4620 // (2) src and dest arrays must have elements of the same BasicType
4621 // (3) src and dest must not be null.
4622 // (4) src_offset must not be negative.
4623 // (5) dest_offset must not be negative.
4624 // (6) length must not be negative.
4625 // (7) src_offset + length must not exceed length of src.
4626 // (8) dest_offset + length must not exceed length of dest.
4627 // (9) each element of an oop array must be assignable
4628
4629 // (3) src and dest must not be null.
4630 // always do this here because we need the JVM state for uncommon traps
4631 src = null_check(src, T_ARRAY);
4632 dest = null_check(dest, T_ARRAY);
4633
4634 bool notest = false;
4635
4636 const Type* src_type = _gvn.type(src);
4637 const Type* dest_type = _gvn.type(dest);
4638 const TypeAryPtr* top_src = src_type->isa_aryptr();
4639 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4640
4641 // Do we have the type of src?
4642 bool has_src = (top_src != NULL && top_src->klass() != NULL);
4643 // Do we have the type of dest?
4644 bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4645 // Is the type for src from speculation?
4646 bool src_spec = false;
4647 // Is the type for dest from speculation?
4648 bool dest_spec = false;
4649
4650 if (!has_src || !has_dest) {
4651 // We don't have sufficient type information, let's see if
4652 // speculative types can help. We need to have types for both src
4653 // and dest so that it pays off.
4654
4718 if (!dest_spec) {
4719 dest_k = dest_type->speculative_type_not_null();
4720 if (dest_k != NULL && dest_k->is_array_klass()) {
4721 could_have_dest = true;
4722 }
4723 }
4724 if (could_have_src && could_have_dest) {
4725 // If we can have both exact types, emit the missing guards
4726 if (could_have_src && !src_spec) {
4727 src = maybe_cast_profiled_obj(src, src_k);
4728 }
4729 if (could_have_dest && !dest_spec) {
4730 dest = maybe_cast_profiled_obj(dest, dest_k);
4731 }
4732 }
4733 }
4734 }
4735
4736 if (!too_many_traps(Deoptimization::Reason_intrinsic) && !src->is_top() && !dest->is_top()) {
4737 // validate arguments: enables transformation the ArrayCopyNode
4738 notest = true;
4739
4740 RegionNode* slow_region = new RegionNode(1);
4741 record_for_igvn(slow_region);
4742
4743 // (1) src and dest are arrays.
4744 generate_non_array_guard(load_object_klass(src), slow_region);
4745 generate_non_array_guard(load_object_klass(dest), slow_region);
4746
4747 // (2) src and dest arrays must have elements of the same BasicType
4748 // done at macro expansion or at Ideal transformation time
4749
4750 // (4) src_offset must not be negative.
4751 generate_negative_guard(src_offset, slow_region);
4752
4753 // (5) dest_offset must not be negative.
4754 generate_negative_guard(dest_offset, slow_region);
4755
4756 // (7) src_offset + length must not exceed length of src.
4757 generate_limit_guard(src_offset, length,
4758 load_array_length(src),
4778 {
4779 PreserveJVMState pjvms(this);
4780 set_control(_gvn.transform(slow_region));
4781 uncommon_trap(Deoptimization::Reason_intrinsic,
4782 Deoptimization::Action_make_not_entrant);
4783 assert(stopped(), "Should be stopped");
4784 }
4785 }
4786
4787 if (stopped()) {
4788 return true;
4789 }
4790
4791 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
4792 // Create LoadRange and LoadKlass nodes for use during macro expansion here
4793 // so the compiler has a chance to eliminate them: during macro expansion,
4794 // we have to set their control (CastPP nodes are eliminated).
4795 load_object_klass(src), load_object_klass(dest),
4796 load_array_length(src), load_array_length(dest));
4797
4798 if (notest) {
4799 ac->set_arraycopy_notest();
4800 }
4801
4802 Node* n = _gvn.transform(ac);
4803 assert(n == ac, "cannot disappear");
4804 ac->connect_outputs(this);
4805
4806 return true;
4807 }
4808
4809
4810 // Helper function which determines if an arraycopy immediately follows
4811 // an allocation, with no intervening tests or other escapes for the object.
4812 AllocateArrayNode*
4813 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
4814 RegionNode* slow_region) {
4815 if (stopped()) return NULL; // no fast path
4816 if (C->AliasLevel() == 0) return NULL; // no MergeMems around
4817
4818 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
4819 if (alloc == NULL) return NULL;
4820
4821 Node* rawmem = memory(Compile::AliasIdxRaw);
4822 // Is the allocation's memory state untouched?
4823 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
4824 // Bail out if there have been raw-memory effects since the allocation.
|
4372 base_off += sizeof(int);
4373 } else {
4374 // Include klass to copy by 8 bytes words.
4375 base_off = instanceOopDesc::klass_offset_in_bytes();
4376 }
4377 assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4378 }
4379 src = basic_plus_adr(src, base_off);
4380 dest = basic_plus_adr(dest, base_off);
4381
4382 // Compute the length also, if needed:
4383 Node* countx = size;
4384 countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
4385 countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong) ));
4386
4387 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4388
4389 ArrayCopyNode* ac = ArrayCopyNode::make(this, false, src, NULL, dest, NULL, countx, false);
4390 ac->set_clonebasic();
4391 Node* n = _gvn.transform(ac);
4392 if (n == ac) {
4393 set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
4394 } else {
4395 set_all_memory(n);
4396 }
4397
4398 // If necessary, emit some card marks afterwards. (Non-arrays only.)
4399 if (card_mark) {
4400 assert(!is_array, "");
4401 // Put in store barrier for any and all oops we are sticking
4402 // into this object. (We could avoid this if we could prove
4403 // that the object type contains no oop fields at all.)
4404 Node* no_particular_value = NULL;
4405 Node* no_particular_field = NULL;
4406 int raw_adr_idx = Compile::AliasIdxRaw;
4407 post_barrier(control(),
4408 memory(raw_adr_type),
4409 alloc_obj,
4410 no_particular_field,
4411 raw_adr_idx,
4412 no_particular_value,
4413 T_OBJECT,
4414 false);
4415 }
4416
4441 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4442 //
4443 // Copying also has two cases, oop arrays and everything else.
4444 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4445 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4446 //
4447 // These steps fold up nicely if and when the cloned object's klass
4448 // can be sharply typed as an object array, a type array, or an instance.
4449 //
4450 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4451 PhiNode* result_val;
4452
4453 // Set the reexecute bit for the interpreter to reexecute
4454 // the bytecode that invokes Object.clone if deoptimization happens.
4455 { PreserveReexecuteState preexecs(this);
4456 jvms()->set_should_reexecute(true);
4457
4458 Node* obj = null_check_receiver();
4459 if (stopped()) return true;
4460
4461 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4462
4463 // If we are going to clone an instance, we need its exact type to
4464 // know the number and types of fields to convert the clone to
4465 // loads/stores. Maybe a speculative type can help us.
4466 if (!obj_type->klass_is_exact() &&
4467 obj_type->speculative_type() != NULL &&
4468 obj_type->speculative_type()->is_instance_klass()) {
4469 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4470 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4471 !spec_ik->has_injected_fields()) {
4472 ciKlass* k = obj_type->klass();
4473 if (!k->is_instance_klass() ||
4474 k->as_instance_klass()->is_interface() ||
4475 k->as_instance_klass()->has_subklass()) {
4476 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4477 }
4478 }
4479 }
4480
4481 Node* obj_klass = load_object_klass(obj);
4482 const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4483 const TypeOopPtr* toop = ((tklass != NULL)
4484 ? tklass->as_instance_type()
4485 : TypeInstPtr::NOTNULL);
4486
4487 // Conservatively insert a memory barrier on all memory slices.
4488 // Do not let writes into the original float below the clone.
4489 insert_mem_bar(Op_MemBarCPUOrder);
4490
4491 // paths into result_reg:
4492 enum {
4493 _slow_path = 1, // out-of-line call to clone method (virtual or not)
4494 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4495 _array_path, // plain array allocation, plus arrayof_long_arraycopy
4496 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4497 PATH_LIMIT
4498 };
4499 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4500 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4637 // Check for allocation before we add nodes that would confuse
4638 // tightly_coupled_allocation()
4639 AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
4640
4641 // The following tests must be performed
4642 // (1) src and dest are arrays.
4643 // (2) src and dest arrays must have elements of the same BasicType
4644 // (3) src and dest must not be null.
4645 // (4) src_offset must not be negative.
4646 // (5) dest_offset must not be negative.
4647 // (6) length must not be negative.
4648 // (7) src_offset + length must not exceed length of src.
4649 // (8) dest_offset + length must not exceed length of dest.
4650 // (9) each element of an oop array must be assignable
4651
4652 // (3) src and dest must not be null.
4653 // always do this here because we need the JVM state for uncommon traps
4654 src = null_check(src, T_ARRAY);
4655 dest = null_check(dest, T_ARRAY);
4656
4657 bool validated = false;
4658
4659 const Type* src_type = _gvn.type(src);
4660 const Type* dest_type = _gvn.type(dest);
4661 const TypeAryPtr* top_src = src_type->isa_aryptr();
4662 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4663
4664 // Do we have the type of src?
4665 bool has_src = (top_src != NULL && top_src->klass() != NULL);
4666 // Do we have the type of dest?
4667 bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4668 // Is the type for src from speculation?
4669 bool src_spec = false;
4670 // Is the type for dest from speculation?
4671 bool dest_spec = false;
4672
4673 if (!has_src || !has_dest) {
4674 // We don't have sufficient type information, let's see if
4675 // speculative types can help. We need to have types for both src
4676 // and dest so that it pays off.
4677
4741 if (!dest_spec) {
4742 dest_k = dest_type->speculative_type_not_null();
4743 if (dest_k != NULL && dest_k->is_array_klass()) {
4744 could_have_dest = true;
4745 }
4746 }
4747 if (could_have_src && could_have_dest) {
4748 // If we can have both exact types, emit the missing guards
4749 if (could_have_src && !src_spec) {
4750 src = maybe_cast_profiled_obj(src, src_k);
4751 }
4752 if (could_have_dest && !dest_spec) {
4753 dest = maybe_cast_profiled_obj(dest, dest_k);
4754 }
4755 }
4756 }
4757 }
4758
4759 if (!too_many_traps(Deoptimization::Reason_intrinsic) && !src->is_top() && !dest->is_top()) {
4760 // validate arguments: enables transformation the ArrayCopyNode
4761 validated = true;
4762
4763 RegionNode* slow_region = new RegionNode(1);
4764 record_for_igvn(slow_region);
4765
4766 // (1) src and dest are arrays.
4767 generate_non_array_guard(load_object_klass(src), slow_region);
4768 generate_non_array_guard(load_object_klass(dest), slow_region);
4769
4770 // (2) src and dest arrays must have elements of the same BasicType
4771 // done at macro expansion or at Ideal transformation time
4772
4773 // (4) src_offset must not be negative.
4774 generate_negative_guard(src_offset, slow_region);
4775
4776 // (5) dest_offset must not be negative.
4777 generate_negative_guard(dest_offset, slow_region);
4778
4779 // (7) src_offset + length must not exceed length of src.
4780 generate_limit_guard(src_offset, length,
4781 load_array_length(src),
4801 {
4802 PreserveJVMState pjvms(this);
4803 set_control(_gvn.transform(slow_region));
4804 uncommon_trap(Deoptimization::Reason_intrinsic,
4805 Deoptimization::Action_make_not_entrant);
4806 assert(stopped(), "Should be stopped");
4807 }
4808 }
4809
4810 if (stopped()) {
4811 return true;
4812 }
4813
4814 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
4815 // Create LoadRange and LoadKlass nodes for use during macro expansion here
4816 // so the compiler has a chance to eliminate them: during macro expansion,
4817 // we have to set their control (CastPP nodes are eliminated).
4818 load_object_klass(src), load_object_klass(dest),
4819 load_array_length(src), load_array_length(dest));
4820
4821 ac->set_arraycopy(validated);
4822
4823 Node* n = _gvn.transform(ac);
4824 if (n == ac) {
4825 ac->connect_outputs(this);
4826 } else {
4827 assert(validated, "shouldn't transform if all arguments not validated");
4828 set_all_memory(n);
4829 }
4830
4831 return true;
4832 }
4833
4834
4835 // Helper function which determines if an arraycopy immediately follows
4836 // an allocation, with no intervening tests or other escapes for the object.
4837 AllocateArrayNode*
4838 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
4839 RegionNode* slow_region) {
4840 if (stopped()) return NULL; // no fast path
4841 if (C->AliasLevel() == 0) return NULL; // no MergeMems around
4842
4843 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
4844 if (alloc == NULL) return NULL;
4845
4846 Node* rawmem = memory(Compile::AliasIdxRaw);
4847 // Is the allocation's memory state untouched?
4848 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
4849 // Bail out if there have been raw-memory effects since the allocation.
|