529 const bool dont_gc_arguments = false;
530
531 OopMapSet* oop_maps = NULL;
532 bool save_fpu_registers = HaveVFP;
533
534 switch (id) {
535 case forward_exception_id:
536 {
537 oop_maps = generate_handle_exception(id, sasm);
538 // does not return on ARM
539 }
540 break;
541
542 case new_instance_id:
543 case fast_new_instance_id:
544 case fast_new_instance_init_check_id:
545 {
546 const Register result = R0;
547 const Register klass = R1;
548
549 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc() && id != new_instance_id) {
550 // We come here when TLAB allocation failed.
551 // In this case we try to allocate directly from eden.
552 Label slow_case, slow_case_no_pop;
553
554 // Make sure the class is fully initialized
555 if (id == fast_new_instance_init_check_id) {
556 __ ldrb(result, Address(klass, InstanceKlass::init_state_offset()));
557 __ cmp(result, InstanceKlass::fully_initialized);
558 __ b(slow_case_no_pop, ne);
559 }
560
561 // Free some temporary registers
562 const Register obj_size = R4;
563 const Register tmp1 = R5;
564 const Register tmp2 = LR;
565 const Register obj_end = Rtemp;
566
567 __ raw_push(R4, R5, LR);
568
569 __ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset()));
570 __ eden_allocate(result, obj_end, tmp1, tmp2, obj_size, slow_case); // initializes result and obj_end
571 __ incr_allocated_bytes(obj_size, tmp2);
599 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2);
600 oop_maps = new OopMapSet();
601 oop_maps->add_gc_map(call_offset, oop_map);
602 restore_live_registers(sasm);
603 }
604 break;
605
606 case new_type_array_id:
607 case new_object_array_id:
608 {
609 if (id == new_type_array_id) {
610 __ set_info("new_type_array", dont_gc_arguments);
611 } else {
612 __ set_info("new_object_array", dont_gc_arguments);
613 }
614
615 const Register result = R0;
616 const Register klass = R1;
617 const Register length = R2;
618
619 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
620 // We come here when TLAB allocation failed.
621 // In this case we try to allocate directly from eden.
622 Label slow_case, slow_case_no_pop;
623
624 #ifdef AARCH64
625 __ mov_slow(Rtemp, C1_MacroAssembler::max_array_allocation_length);
626 __ cmp_32(length, Rtemp);
627 #else
628 __ cmp_32(length, C1_MacroAssembler::max_array_allocation_length);
629 #endif // AARCH64
630 __ b(slow_case_no_pop, hs);
631
632 // Free some temporary registers
633 const Register arr_size = R4;
634 const Register tmp1 = R5;
635 const Register tmp2 = LR;
636 const Register tmp3 = Rtemp;
637 const Register obj_end = tmp3;
638
639 __ raw_push(R4, R5, LR);
640
641 // Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size)
|
529 const bool dont_gc_arguments = false;
530
531 OopMapSet* oop_maps = NULL;
532 bool save_fpu_registers = HaveVFP;
533
534 switch (id) {
535 case forward_exception_id:
536 {
537 oop_maps = generate_handle_exception(id, sasm);
538 // does not return on ARM
539 }
540 break;
541
542 case new_instance_id:
543 case fast_new_instance_id:
544 case fast_new_instance_init_check_id:
545 {
546 const Register result = R0;
547 const Register klass = R1;
548
549 // If TLAB is disabled, see if there is support for inlining contiguous
550 // allocations.
551 // Otherwise, just go to the slow path.
552 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc() && id != new_instance_id) {
553 Label slow_case, slow_case_no_pop;
554
555 // Make sure the class is fully initialized
556 if (id == fast_new_instance_init_check_id) {
557 __ ldrb(result, Address(klass, InstanceKlass::init_state_offset()));
558 __ cmp(result, InstanceKlass::fully_initialized);
559 __ b(slow_case_no_pop, ne);
560 }
561
562 // Free some temporary registers
563 const Register obj_size = R4;
564 const Register tmp1 = R5;
565 const Register tmp2 = LR;
566 const Register obj_end = Rtemp;
567
568 __ raw_push(R4, R5, LR);
569
570 __ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset()));
571 __ eden_allocate(result, obj_end, tmp1, tmp2, obj_size, slow_case); // initializes result and obj_end
572 __ incr_allocated_bytes(obj_size, tmp2);
600 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2);
601 oop_maps = new OopMapSet();
602 oop_maps->add_gc_map(call_offset, oop_map);
603 restore_live_registers(sasm);
604 }
605 break;
606
607 case new_type_array_id:
608 case new_object_array_id:
609 {
610 if (id == new_type_array_id) {
611 __ set_info("new_type_array", dont_gc_arguments);
612 } else {
613 __ set_info("new_object_array", dont_gc_arguments);
614 }
615
616 const Register result = R0;
617 const Register klass = R1;
618 const Register length = R2;
619
620 // If TLAB is disabled, see if there is support for inlining contiguous
621 // allocations.
622 // Otherwise, just go to the slow path.
623 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
624 Label slow_case, slow_case_no_pop;
625
626 #ifdef AARCH64
627 __ mov_slow(Rtemp, C1_MacroAssembler::max_array_allocation_length);
628 __ cmp_32(length, Rtemp);
629 #else
630 __ cmp_32(length, C1_MacroAssembler::max_array_allocation_length);
631 #endif // AARCH64
632 __ b(slow_case_no_pop, hs);
633
634 // Free some temporary registers
635 const Register arr_size = R4;
636 const Register tmp1 = R5;
637 const Register tmp2 = LR;
638 const Register tmp3 = Rtemp;
639 const Register obj_end = tmp3;
640
641 __ raw_push(R4, R5, LR);
642
643 // Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size)
|