670 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
671 }
672 break;
673
674 case new_instance_id:
675 case fast_new_instance_id:
676 case fast_new_instance_init_check_id:
677 {
678 Register klass = r3; // Incoming
679 Register obj = r0; // Result
680
681 if (id == new_instance_id) {
682 __ set_info("new_instance", dont_gc_arguments);
683 } else if (id == fast_new_instance_id) {
684 __ set_info("fast new_instance", dont_gc_arguments);
685 } else {
686 assert(id == fast_new_instance_init_check_id, "bad StubID");
687 __ set_info("fast new_instance init check", dont_gc_arguments);
688 }
689
690 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
691 UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
692 Label slow_path;
693 Register obj_size = r2;
694 Register t1 = r19;
695 Register t2 = r4;
696 assert_different_registers(klass, obj, obj_size, t1, t2);
697
698 __ stp(r19, zr, Address(__ pre(sp, -2 * wordSize)));
699
700 if (id == fast_new_instance_init_check_id) {
701 // make sure the klass is initialized
702 __ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));
703 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
704 __ br(Assembler::NE, slow_path);
705 }
706
707 #ifdef ASSERT
708 // assert object can be fast path allocated
709 {
710 Label ok, not_ok;
711 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
782
783 #ifdef ASSERT
784 // assert object type is really an array of the proper kind
785 {
786 Label ok;
787 Register t0 = obj;
788 __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
789 __ asrw(t0, t0, Klass::_lh_array_tag_shift);
790 int tag = ((id == new_type_array_id)
791 ? Klass::_lh_array_tag_type_value
792 : Klass::_lh_array_tag_obj_value);
793 __ mov(rscratch1, tag);
794 __ cmpw(t0, rscratch1);
795 __ br(Assembler::EQ, ok);
796 __ stop("assert(is an array klass)");
797 __ should_not_reach_here();
798 __ bind(ok);
799 }
800 #endif // ASSERT
801
802 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
803 Register arr_size = r4;
804 Register t1 = r2;
805 Register t2 = r5;
806 Label slow_path;
807 assert_different_registers(length, klass, obj, arr_size, t1, t2);
808
809 // check that array length is small enough for fast path.
810 __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
811 __ cmpw(length, rscratch1);
812 __ br(Assembler::HI, slow_path);
813
814 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
815 // since size is positive ldrw does right thing on 64bit
816 __ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
817 // since size is positive movw does right thing on 64bit
818 __ movw(arr_size, length);
819 __ lslvw(arr_size, length, t1);
820 __ ubfx(t1, t1, Klass::_lh_header_size_shift,
821 exact_log2(Klass::_lh_header_size_mask + 1));
822 __ add(arr_size, arr_size, t1);
|
670 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
671 }
672 break;
673
674 case new_instance_id:
675 case fast_new_instance_id:
676 case fast_new_instance_init_check_id:
677 {
678 Register klass = r3; // Incoming
679 Register obj = r0; // Result
680
681 if (id == new_instance_id) {
682 __ set_info("new_instance", dont_gc_arguments);
683 } else if (id == fast_new_instance_id) {
684 __ set_info("fast new_instance", dont_gc_arguments);
685 } else {
686 assert(id == fast_new_instance_init_check_id, "bad StubID");
687 __ set_info("fast new_instance init check", dont_gc_arguments);
688 }
689
690 // If TLAB is disabled, see if there is support for inlining contiguous
691 // allocations.
692 // Otherwise, just go to the slow path.
693 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
694 !UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
695 Label slow_path;
696 Register obj_size = r2;
697 Register t1 = r19;
698 Register t2 = r4;
699 assert_different_registers(klass, obj, obj_size, t1, t2);
700
701 __ stp(r19, zr, Address(__ pre(sp, -2 * wordSize)));
702
703 if (id == fast_new_instance_init_check_id) {
704 // make sure the klass is initialized
705 __ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));
706 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
707 __ br(Assembler::NE, slow_path);
708 }
709
710 #ifdef ASSERT
711 // assert object can be fast path allocated
712 {
713 Label ok, not_ok;
714 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
785
786 #ifdef ASSERT
787 // assert object type is really an array of the proper kind
788 {
789 Label ok;
790 Register t0 = obj;
791 __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
792 __ asrw(t0, t0, Klass::_lh_array_tag_shift);
793 int tag = ((id == new_type_array_id)
794 ? Klass::_lh_array_tag_type_value
795 : Klass::_lh_array_tag_obj_value);
796 __ mov(rscratch1, tag);
797 __ cmpw(t0, rscratch1);
798 __ br(Assembler::EQ, ok);
799 __ stop("assert(is an array klass)");
800 __ should_not_reach_here();
801 __ bind(ok);
802 }
803 #endif // ASSERT
804
805 // If TLAB is disabled, see if there is support for inlining contiguous
806 // allocations.
807 // Otherwise, just go to the slow path.
808 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
809 Register arr_size = r4;
810 Register t1 = r2;
811 Register t2 = r5;
812 Label slow_path;
813 assert_different_registers(length, klass, obj, arr_size, t1, t2);
814
815 // check that array length is small enough for fast path.
816 __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
817 __ cmpw(length, rscratch1);
818 __ br(Assembler::HI, slow_path);
819
820 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
821 // since size is positive ldrw does right thing on 64bit
822 __ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
823 // since size is positive movw does right thing on 64bit
824 __ movw(arr_size, length);
825 __ lslvw(arr_size, length, t1);
826 __ ubfx(t1, t1, Klass::_lh_header_size_shift,
827 exact_log2(Klass::_lh_header_size_mask + 1));
828 __ add(arr_size, arr_size, t1);
|