< prev index next >

src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp

Print this page
rev 12409 : 8169177: aarch64: SIGSEGV when "-XX:+ZeroTLAB" is specified along with GC options
Summary: Add zero-initialization to C1 for fast TLAB refills
Reviewed-by: aph, drwhite
Contributed-by: kavitha.natarajan@linaro.org

@@ -726,11 +726,11 @@
           // get the instance size (size is postive so movl is fine for 64bit)
           __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 
           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
 
-          __ initialize_object(obj, klass, obj_size, 0, t1, t2);
+          __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true);
           __ verify_oop(obj);
           __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
           __ ret(lr);
 
           __ bind(try_eden);

@@ -738,11 +738,11 @@
           __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 
           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
           __ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);
 
-          __ initialize_object(obj, klass, obj_size, 0, t1, t2);
+          __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
           __ verify_oop(obj);
           __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
           __ ret(lr);
 
           __ bind(slow_path);

@@ -851,11 +851,13 @@
           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
           __ andr(t1, t1, Klass::_lh_header_size_mask);
           __ sub(arr_size, arr_size, t1);  // body length
           __ add(t1, t1, obj);       // body start
+          if (!ZeroTLAB) {
           __ initialize_body(t1, arr_size, 0, t2);
+          }
           __ verify_oop(obj);
 
           __ ret(lr);
 
           __ bind(try_eden);
< prev index next >