< prev index next >

src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp

Print this page
rev 12409 : 8169177: aarch64: SIGSEGV when "-XX:+ZeroTLAB" is specified along with GC options
Summary: Add zero-initialization to C1 for fast TLAB refills
Reviewed-by: aph, drwhite
Contributed-by: kavitha.natarajan@linaro.org


 711             __ br(Assembler::EQ, ok);
 712             __ bind(not_ok);
 713             __ stop("assert(can be fast path allocated)");
 714             __ should_not_reach_here();
 715             __ bind(ok);
 716           }
 717 #endif // ASSERT
 718 
 719           // if we got here then the TLAB allocation failed, so try
 720           // refilling the TLAB or allocating directly from eden.
 721           Label retry_tlab, try_eden;
 722           __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5
 723 
 724           __ bind(retry_tlab);
 725 
 726           // get the instance size (size is postive so movl is fine for 64bit)
 727           __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 728 
 729           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
 730 
 731           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
 732           __ verify_oop(obj);
 733           __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
 734           __ ret(lr);
 735 
 736           __ bind(try_eden);
 737           // get the instance size (size is postive so movl is fine for 64bit)
 738           __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 739 
 740           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
 741           __ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);
 742 
 743           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
 744           __ verify_oop(obj);
 745           __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
 746           __ ret(lr);
 747 
 748           __ bind(slow_path);
 749           __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
 750         }
 751 
 752         __ enter();
 753         OopMap* map = save_live_registers(sasm);
 754         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
 755         oop_maps = new OopMapSet();
 756         oop_maps->add_gc_map(call_offset, map);
 757         restore_live_registers_except_r0(sasm);
 758         __ verify_oop(obj);
 759         __ leave();
 760         __ ret(lr);
 761 
 762         // r0,: new instance
 763       }


 836 
 837           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
 838           // since size is positive ldrw does right thing on 64bit
 839           __ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
 840           __ lslvw(arr_size, length, t1);
 841           __ ubfx(t1, t1, Klass::_lh_header_size_shift,
 842                   exact_log2(Klass::_lh_header_size_mask + 1));
 843           __ add(arr_size, arr_size, t1);
 844           __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
 845           __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
 846 
 847           __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
 848 
 849           __ initialize_header(obj, klass, length, t1, t2);
 850           __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
 851           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
 852           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
 853           __ andr(t1, t1, Klass::_lh_header_size_mask);
 854           __ sub(arr_size, arr_size, t1);  // body length
 855           __ add(t1, t1, obj);       // body start

 856           __ initialize_body(t1, arr_size, 0, t2);

 857           __ verify_oop(obj);
 858 
 859           __ ret(lr);
 860 
 861           __ bind(try_eden);
 862           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
 863           // since size is positive ldrw does right thing on 64bit
 864           __ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
 865           // since size is postive movw does right thing on 64bit
 866           __ movw(arr_size, length);
 867           __ lslvw(arr_size, length, t1);
 868           __ ubfx(t1, t1, Klass::_lh_header_size_shift,
 869                   exact_log2(Klass::_lh_header_size_mask + 1));
 870           __ add(arr_size, arr_size, t1);
 871           __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
 872           __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
 873 
 874           __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
 875           __ incr_allocated_bytes(thread, arr_size, 0, rscratch1);
 876 




 711             __ br(Assembler::EQ, ok);
 712             __ bind(not_ok);
 713             __ stop("assert(can be fast path allocated)");
 714             __ should_not_reach_here();
 715             __ bind(ok);
 716           }
 717 #endif // ASSERT
 718 
 719           // if we got here then the TLAB allocation failed, so try
 720           // refilling the TLAB or allocating directly from eden.
 721           Label retry_tlab, try_eden;
 722           __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5
 723 
 724           __ bind(retry_tlab);
 725 
 726           // get the instance size (size is postive so movl is fine for 64bit)
 727           __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 728 
 729           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
 730 
 731           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true);
 732           __ verify_oop(obj);
 733           __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
 734           __ ret(lr);
 735 
 736           __ bind(try_eden);
 737           // get the instance size (size is postive so movl is fine for 64bit)
 738           __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 739 
 740           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
 741           __ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);
 742 
 743           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
 744           __ verify_oop(obj);
 745           __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
 746           __ ret(lr);
 747 
 748           __ bind(slow_path);
 749           __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
 750         }
 751 
 752         __ enter();
 753         OopMap* map = save_live_registers(sasm);
 754         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
 755         oop_maps = new OopMapSet();
 756         oop_maps->add_gc_map(call_offset, map);
 757         restore_live_registers_except_r0(sasm);
 758         __ verify_oop(obj);
 759         __ leave();
 760         __ ret(lr);
 761 
 762         // r0,: new instance
 763       }


 836 
 837           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
 838           // since size is positive ldrw does right thing on 64bit
 839           __ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
 840           __ lslvw(arr_size, length, t1);
 841           __ ubfx(t1, t1, Klass::_lh_header_size_shift,
 842                   exact_log2(Klass::_lh_header_size_mask + 1));
 843           __ add(arr_size, arr_size, t1);
 844           __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
 845           __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
 846 
 847           __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
 848 
 849           __ initialize_header(obj, klass, length, t1, t2);
 850           __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
 851           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
 852           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
 853           __ andr(t1, t1, Klass::_lh_header_size_mask);
 854           __ sub(arr_size, arr_size, t1);  // body length
 855           __ add(t1, t1, obj);       // body start
 856           if (!ZeroTLAB) {
 857            __ initialize_body(t1, arr_size, 0, t2);
 858           }
 859           __ verify_oop(obj);
 860 
 861           __ ret(lr);
 862 
 863           __ bind(try_eden);
 864           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
 865           // since size is positive ldrw does right thing on 64bit
 866           __ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
 867           // since size is postive movw does right thing on 64bit
 868           __ movw(arr_size, length);
 869           __ lslvw(arr_size, length, t1);
 870           __ ubfx(t1, t1, Klass::_lh_header_size_shift,
 871                   exact_log2(Klass::_lh_header_size_mask + 1));
 872           __ add(arr_size, arr_size, t1);
 873           __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
 874           __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
 875 
 876           __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
 877           __ incr_allocated_bytes(thread, arr_size, 0, rscratch1);
 878 


< prev index next >