705 #ifdef ASSERT
706 // assert object can be fast path allocated
707 {
708 Label ok, not_ok;
709 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
710 __ cmp(obj_size, 0u);
711 __ br(Assembler::LE, not_ok); // make sure it's an instance (LH > 0)
712 __ tstw(obj_size, Klass::_lh_instance_slow_path_bit);
713 __ br(Assembler::EQ, ok);
714 __ bind(not_ok);
715 __ stop("assert(can be fast path allocated)");
716 __ should_not_reach_here();
717 __ bind(ok);
718 }
719 #endif // ASSERT
720
721 // get the instance size (size is postive so movl is fine for 64bit)
722 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
723
724 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
725 __ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);
726
727 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
728 __ verify_oop(obj);
729 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
730 __ ret(lr);
731
732 __ bind(slow_path);
733 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
734 }
735
736 __ enter();
737 OopMap* map = save_live_registers(sasm);
738 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
739 oop_maps = new OopMapSet();
740 oop_maps->add_gc_map(call_offset, map);
741 restore_live_registers_except_r0(sasm);
742 __ verify_oop(obj);
743 __ leave();
744 __ ret(lr);
745
806 assert_different_registers(length, klass, obj, arr_size, t1, t2);
807
808 // check that array length is small enough for fast path.
809 __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
810 __ cmpw(length, rscratch1);
811 __ br(Assembler::HI, slow_path);
812
813 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
814 // since size is positive ldrw does right thing on 64bit
815 __ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
816 // since size is positive movw does right thing on 64bit
817 __ movw(arr_size, length);
818 __ lslvw(arr_size, length, t1);
819 __ ubfx(t1, t1, Klass::_lh_header_size_shift,
820 exact_log2(Klass::_lh_header_size_mask + 1));
821 __ add(arr_size, arr_size, t1);
822 __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
823 __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
824
825 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
826 __ incr_allocated_bytes(rthread, arr_size, 0, rscratch1);
827
828 __ initialize_header(obj, klass, length, t1, t2);
829 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
830 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
831 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
832 __ andr(t1, t1, Klass::_lh_header_size_mask);
833 __ sub(arr_size, arr_size, t1); // body length
834 __ add(t1, t1, obj); // body start
835 __ initialize_body(t1, arr_size, 0, t2);
836 __ verify_oop(obj);
837
838 __ ret(lr);
839
840 __ bind(slow_path);
841 }
842
843 __ enter();
844 OopMap* map = save_live_registers(sasm);
845 int call_offset;
846 if (id == new_type_array_id) {
|
705 #ifdef ASSERT
706 // assert object can be fast path allocated
707 {
708 Label ok, not_ok;
709 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
710 __ cmp(obj_size, 0u);
711 __ br(Assembler::LE, not_ok); // make sure it's an instance (LH > 0)
712 __ tstw(obj_size, Klass::_lh_instance_slow_path_bit);
713 __ br(Assembler::EQ, ok);
714 __ bind(not_ok);
715 __ stop("assert(can be fast path allocated)");
716 __ should_not_reach_here();
717 __ bind(ok);
718 }
719 #endif // ASSERT
720
721 // get the instance size (size is postive so movl is fine for 64bit)
722 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
723
724 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
725
726 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
727 __ verify_oop(obj);
728 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
729 __ ret(lr);
730
731 __ bind(slow_path);
732 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
733 }
734
735 __ enter();
736 OopMap* map = save_live_registers(sasm);
737 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
738 oop_maps = new OopMapSet();
739 oop_maps->add_gc_map(call_offset, map);
740 restore_live_registers_except_r0(sasm);
741 __ verify_oop(obj);
742 __ leave();
743 __ ret(lr);
744
805 assert_different_registers(length, klass, obj, arr_size, t1, t2);
806
807 // check that array length is small enough for fast path.
808 __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
809 __ cmpw(length, rscratch1);
810 __ br(Assembler::HI, slow_path);
811
812 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
813 // since size is positive ldrw does right thing on 64bit
814 __ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
815 // since size is positive movw does right thing on 64bit
816 __ movw(arr_size, length);
817 __ lslvw(arr_size, length, t1);
818 __ ubfx(t1, t1, Klass::_lh_header_size_shift,
819 exact_log2(Klass::_lh_header_size_mask + 1));
820 __ add(arr_size, arr_size, t1);
821 __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
822 __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
823
824 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
825
826 __ initialize_header(obj, klass, length, t1, t2);
827 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
828 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
829 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
830 __ andr(t1, t1, Klass::_lh_header_size_mask);
831 __ sub(arr_size, arr_size, t1); // body length
832 __ add(t1, t1, obj); // body start
833 __ initialize_body(t1, arr_size, 0, t2);
834 __ verify_oop(obj);
835
836 __ ret(lr);
837
838 __ bind(slow_path);
839 }
840
841 __ enter();
842 OopMap* map = save_live_registers(sasm);
843 int call_offset;
844 if (id == new_type_array_id) {
|