< prev index next >

src/hotspot/cpu/aarch64/templateTable_aarch64.cpp

Print this page
rev 53735 : AArch64 support for ValueTypes

@@ -168,10 +168,11 @@
 {
   if (!RewriteBytecodes)  return;
   Label L_patch_done;
 
   switch (bc) {
+  case Bytecodes::_fast_qputfield:
   case Bytecodes::_fast_aputfield:
   case Bytecodes::_fast_bputfield:
   case Bytecodes::_fast_zputfield:
   case Bytecodes::_fast_cputfield:
   case Bytecodes::_fast_dputfield:

@@ -806,15 +807,25 @@
   __ mov(r1, r0);
   __ pop_ptr(r0);
   // r0: array
   // r1: index
   index_check(r0, r1); // leaves index in r1, kills rscratch1
+  if (EnableValhalla && ValueArrayFlatten) {
+    Label is_flat_array, done;
+
+    __ test_flat_array_oop(r0, r10 /*temp*/, is_flat_array); 
+    __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
+    do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
+
+    __ b(done);
+    __ bind(is_flat_array);
+    __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), r0, r1);
+    __ bind(done);
+  } else {
   __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
-  do_oop_load(_masm,
-              Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
-              r0,
-              IS_ARRAY);
+    do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
+  }
 }
 
 void TemplateTable::baload()
 {
   transition(itos, itos);

@@ -1107,15 +1118,24 @@
   __ ldr(r3, at_tos_p2()); // array
 
   Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
 
   index_check(r3, r2);     // kills r1
+
+  // DMS CHECK: what does line below do?
   __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 
   // do array store check - check for NULL value first
   __ cbz(r0, is_null);
 
+  // Load array klass to r1, check if it is flat and bail out to ususal way
+  Label  is_flat_array;
+  if (ValueArrayFlatten) {
+    __ load_klass(r1, r3); 
+    __ test_flat_array_klass(r1, r10 /*temp*/, is_flat_array);
+  }
+
   // Move subklass into r1
   __ load_klass(r1, r0);
   // Move superklass into r0
   __ load_klass(r0, r3);
   __ ldr(r0, Address(r0,

@@ -1141,12 +1161,68 @@
 
   // Have a NULL in r0, r3=array, r2=index.  Store NULL at ary[idx]
   __ bind(is_null);
   __ profile_null_seen(r2);
 
+  if (EnableValhalla) {
+    Label is_null_into_value_array_npe, store_null;
+
+    __ load_klass(r0, r3);
+    // No way to store null in flat array
+    __ test_flat_array_klass(r0, r1, is_null_into_value_array_npe);
+
+    // Use case for storing values in objArray where element_klass is specifically
+    // a value type because they could not be flattened "for reasons",
+    // these need to have the same semantics as flat arrays, i.e. NPE
+    __ ldr(r0, Address(r0, ObjArrayKlass::element_klass_offset()));
+    __ test_klass_is_value(r0, r1, is_null_into_value_array_npe);
+    __ b(store_null);
+
+    __ bind(is_null_into_value_array_npe);
+    __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
+
+    __ bind(store_null);
+  }
+
   // Store a NULL
   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
+  __ b(done);
+
+
+  if (EnableValhalla) { 
+    // r0 - value, r2 - index, r3 - array. r1 - loaded array klass
+    // store non-null value
+    __ bind(is_flat_array);
+
+    // Simplistic type check...
+     Label is_type_ok;
+
+    // Profile the not-null value's klass.
+    // Load value class 
+    __ load_klass(r10, r0);
+    __ profile_typecheck(r2, r1, r0); // blows r2, and r0
+
+    // flat value array needs exact type match
+    // is "r10 == r0" (value subclass == array element superclass)
+  
+    // Move element klass into r0
+     __ ldr(r0, Address(r1, ArrayKlass::element_klass_offset())); 
+     __ cmp(r0, r10);
+     __ br(Assembler::EQ, is_type_ok);
+
+     __ profile_typecheck_failed(r2);
+     __ b(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
+     __ bind(is_type_ok);
+
+    // DMS CHECK: Reload from TOS to be safe, 
+    // DMS CHECK: Because of profile_typecheck that blows r2 and r0. Should we really do it?
+     __ ldr(r1, at_tos());  // value
+     __ mov(r2, r3); // array
+     __ ldr(r3, at_tos_p1()); // index
+     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), r1, r2, r3);
+  }
+
 
   // Pop stack arguments
   __ bind(done);
   __ add(esp, esp, 3 * Interpreter::stackElementSize);
 }

@@ -2023,14 +2099,29 @@
 
 void TemplateTable::if_acmp(Condition cc)
 {
   transition(atos, vtos);
   // assume branch is more often taken than not (loops use backward branches)
-  Label not_taken;
+  Label taken, not_taken;
   __ pop_ptr(r1);
   __ cmpoop(r1, r0);
+
+  if (EnableValhalla) { 
+    guarantee(UsePointerPerturbation == false, "UsePointerPerturbation is not implemented");
+
+    __ br(Assembler::NE, (cc == not_equal) ? taken : not_taken);
+    __ cbz(r1, (cc == equal) ? taken : not_taken);
+    __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes()));  
+    // DMS CHECK: Is code below correct? 
+    __ andr(r2, r2, markOopDesc::always_locked_pattern && 0xF);
+    __ cmp(r2, (u1) markOopDesc::always_locked_pattern);
+    cc = (cc == equal) ? not_equal : equal;
+  }
+
+
   __ br(j_not(cc), not_taken);
+  __ bind(taken);
   branch(false, false);
   __ bind(not_taken);
   __ profile_not_taken_branch(r0);
 }
 

@@ -2495,12 +2586,11 @@
   Label Done, notByte, notBool, notInt, notShort, notChar,
               notLong, notFloat, notObj, notDouble;
 
   // x86 uses a shift and mask or wings it with a shift plus assert
   // the mask is not needed. aarch64 just uses bitfield extract
-  __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift,
-           ConstantPoolCacheEntry::tos_state_bits);
+  __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
 
   assert(btos == 0, "change code, btos != 0");
   __ cbnz(flags, notByte);
 
   // Don't rewrite getstatic, only getfield

@@ -2531,16 +2621,72 @@
 
   __ bind(notBool);
   __ cmp(flags, (u1)atos);
   __ br(Assembler::NE, notObj);
   // atos
+  if (!EnableValhalla) {
   do_oop_load(_masm, field, r0, IN_HEAP);
   __ push(atos);
   if (rc == may_rewrite) {
     patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
   }
   __ b(Done);
+  } else { // Valhalla
+
+    if (is_static) {
+      __ load_heap_oop(r0, field);
+      Label isFlattenable, isUninitialized;
+      // Issue below if the static field has not been initialized yet
+      __ test_field_is_flattenable(raw_flags, r10, isFlattenable);
+        // Not flattenable case
+        __ push(atos);
+        __ b(Done);
+      // Flattenable case, must not return null even if uninitialized
+      __ bind(isFlattenable);
+        __ cbz(r0, isUninitialized);
+          __ push(atos);
+          __ b(Done);
+        __ bind(isUninitialized);
+          __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
+          __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_value_field), obj, raw_flags);
+          __ verify_oop(r0);
+          __ push(atos);
+          __ b(Done);
+    } else {
+      Label isFlattened, isInitialized, isFlattenable, rewriteFlattenable;
+        __ test_field_is_flattenable(raw_flags, r10, isFlattenable);
+        // Non-flattenable field case, also covers the object case
+        __ load_heap_oop(r0, field);
+        __ push(atos);
+        if (rc == may_rewrite) {
+          patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
+        }
+        __ b(Done);
+      __ bind(isFlattenable);
+        __ test_field_is_flattened(raw_flags, r10, isFlattened);
+         // Non-flattened field case
+          __ load_heap_oop(r0, field);
+          __ cbnz(r0, isInitialized);
+            __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
+            __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), obj, raw_flags);
+          __ bind(isInitialized);
+          __ verify_oop(r0);
+          __ push(atos);
+          __ b(rewriteFlattenable);
+        __ bind(isFlattened);
+          __ ldr(r10, Address(cache, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset())));
+          __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
+          call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), obj, raw_flags, r10); 
+          __ verify_oop(r0);
+          __ push(atos);
+      __ bind(rewriteFlattenable);
+      if (rc == may_rewrite) { 
+         patch_bytecode(Bytecodes::_fast_qgetfield, bc, r1);
+      }
+      __ b(Done);
+    }
+  }
 
   __ bind(notObj);
   __ cmp(flags, (u1)itos);
   __ br(Assembler::NE, notInt);
   // itos

@@ -2706,10 +2852,11 @@
   const Register cache = r2;
   const Register index = r3;
   const Register obj   = r2;
   const Register off   = r19;
   const Register flags = r0;
+  const Register flags2 = r6;
   const Register bc    = r4;
 
   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
   jvmti_post_field_mod(cache, index, is_static);
   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);

@@ -2728,10 +2875,12 @@
   const Address field(obj, off);
 
   Label notByte, notBool, notInt, notShort, notChar,
         notLong, notFloat, notObj, notDouble;
 
+  __ mov(flags2, flags); 
+
   // x86 uses a shift and mask or wings it with a shift plus assert
   // the mask is not needed. aarch64 just uses bitfield extract
   __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
 
   assert(btos == 0, "change code, btos != 0");

@@ -2770,18 +2919,60 @@
   __ cmp(flags, (u1)atos);
   __ br(Assembler::NE, notObj);
 
   // atos
   {
+     if (!EnableValhalla) {
     __ pop(atos);
     if (!is_static) pop_and_check_object(obj);
     // Store into the field
     do_oop_store(_masm, field, r0, IN_HEAP);
     if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
     }
     __ b(Done);
+     } else { // Valhalla
+
+      __ pop(atos);
+      if (is_static) {
+        Label notFlattenable;
+         __ test_field_is_not_flattenable(flags2, r10, notFlattenable);
+         __ null_check(r0);
+         __ bind(notFlattenable);
+         do_oop_store(_masm, field, r0, IN_HEAP);
+         __ b(Done);
+      } else {
+        Label isFlattenable, isFlattened, notBuffered, notBuffered2, rewriteNotFlattenable, rewriteFlattenable;
+        __ test_field_is_flattenable(flags2, r10, isFlattenable);
+        // Not flattenable case, covers not flattenable values and objects
+        pop_and_check_object(obj);
+        // Store into the field
+        do_oop_store(_masm, field, r0, IN_HEAP);
+        __ bind(rewriteNotFlattenable);
+        if (rc == may_rewrite) {
+          patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no); 
+        }
+        __ b(Done);
+        // Implementation of the flattenable semantic
+        __ bind(isFlattenable);
+        __ null_check(r0);
+        __ test_field_is_flattened(flags2, r10, isFlattened);
+        // Not flattened case
+        pop_and_check_object(obj);
+        // Store into the field
+        do_oop_store(_masm, field, r0, IN_HEAP);
+        __ b(rewriteFlattenable);
+        __ bind(isFlattened);
+        pop_and_check_object(obj);
+        call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), r0, off, obj);
+        __ bind(rewriteFlattenable);
+        if (rc == may_rewrite) {
+          patch_bytecode(Bytecodes::_fast_qputfield, bc, r19, true, byte_no);
+        }
+        __ b(Done);
+      }
+     }  // Valhalla
   }
 
   __ bind(notObj);
   __ cmp(flags, (u1)itos);
   __ br(Assembler::NE, notInt);

@@ -2917,10 +3108,11 @@
     __ push_ptr(r19);                 // put the object pointer back on tos
     // Save tos values before call_VM() clobbers them. Since we have
     // to do it for every data type, we use the saved values as the
     // jvalue object.
     switch (bytecode()) {          // load values into the jvalue object
+    case Bytecodes::_fast_qputfield: //fall through
     case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
     case Bytecodes::_fast_bputfield: // fall through
     case Bytecodes::_fast_zputfield: // fall through
     case Bytecodes::_fast_sputfield: // fall through
     case Bytecodes::_fast_cputfield: // fall through

@@ -2943,10 +3135,11 @@
                CAST_FROM_FN_PTR(address,
                                 InterpreterRuntime::post_field_modification),
                r19, c_rarg2, c_rarg3);
 
     switch (bytecode()) {             // restore tos values
+    case Bytecodes::_fast_qputfield: //fall through
     case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
     case Bytecodes::_fast_bputfield: // fall through
     case Bytecodes::_fast_zputfield: // fall through
     case Bytecodes::_fast_sputfield: // fall through
     case Bytecodes::_fast_cputfield: // fall through

@@ -2993,10 +3186,23 @@
   // field address
   const Address field(r2, r1);
 
   // access field
   switch (bytecode()) {
+  case Bytecodes::_fast_qputfield: //fall through 
+   {
+      Label isFlattened, done; 
+      __ null_check(r0);
+      __ test_field_is_flattened(r3, r10, isFlattened);
+      // No Flattened case
+      do_oop_store(_masm, field, r0, IN_HEAP);
+      __ b(done);
+      __ bind(isFlattened);
+      call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), r0, r1, r2);
+      __ bind(done);
+    }
+    break;
   case Bytecodes::_fast_aputfield:
     do_oop_store(_masm, field, r0, IN_HEAP);
     break;
   case Bytecodes::_fast_lputfield:
     __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);

@@ -3086,10 +3292,36 @@
     __ bind(notVolatile);
   }
 
   // access field
   switch (bytecode()) {
+  case Bytecodes::_fast_qgetfield: 
+    {
+       Label isFlattened, isInitialized, Done;
+       // DMS CHECK: We don't need to reload multiple times, but stay close to original code
+       __ ldrw(r10, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()))); 
+       __ test_field_is_flattened(r10, r10, isFlattened);
+        // Non-flattened field case
+        __ mov(r10, r0);
+        __ load_heap_oop(r0, field);
+        __ cbnz(r0, isInitialized);
+          __ mov(r0, r10);
+          __ ldrw(r10, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()))); 
+          __ andw(r10, r10, ConstantPoolCacheEntry::field_index_mask);
+          __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), r0, r10);
+        __ bind(isInitialized);
+        __ verify_oop(r0);
+        __ b(Done);
+      __ bind(isFlattened);
+        __ ldrw(r10, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())));
+        __ andw(r10, r10, ConstantPoolCacheEntry::field_index_mask);
+        __ ldr(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset())));
+        call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), r0, r10, r3);
+        __ verify_oop(r0);
+      __ bind(Done);
+    }
+    break;
   case Bytecodes::_fast_agetfield:
     do_oop_load(_masm, field, r0, IN_HEAP);
     __ verify_oop(r0);
     break;
   case Bytecodes::_fast_lgetfield:

@@ -3643,10 +3875,34 @@
   __ bind(done);
   // Must prevent reordering of stores for object initialization with stores that publish the new object.
   __ membar(Assembler::StoreStore);
 }
 
+void TemplateTable::defaultvalue() {
+  transition(vtos, atos);
+  __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
+  __ get_constant_pool(c_rarg1);
+  call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::defaultvalue),
+          c_rarg1, c_rarg2);
+  __ verify_oop(r0);
+  // Must prevent reordering of stores for object initialization with stores that publish the new object.
+  __ membar(Assembler::StoreStore);
+}
+
+void TemplateTable::withfield() {
+  transition(vtos, atos);
+  resolve_cache_and_index(f2_byte, c_rarg1 /*cache*/, c_rarg2 /*index*/, sizeof(u2));
+
+  // n.b. unlike x86 cache is now rcpool plus the indexed offset
+  // so using rcpool to meet shared code expectations
+ 
+  call_VM(r1, CAST_FROM_FN_PTR(address, InterpreterRuntime::withfield), rcpool);
+  __ verify_oop(r1);
+  __ add(esp, esp, r0);
+  __ mov(r0, r1);
+}
+
 void TemplateTable::newarray() {
   transition(itos, atos);
   __ load_unsigned_byte(c_rarg1, at_bcp(1));
   __ mov(c_rarg2, r0);
   call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),

@@ -3714,18 +3970,33 @@
 
   // Come here on success
   __ bind(ok_is_subtype);
   __ mov(r0, r3); // Restore object in r3
 
-  // Collect counts on whether this test sees NULLs a lot or not.
-  if (ProfileInterpreter) {
     __ b(done);
     __ bind(is_null);
+
+  // Collect counts on whether this test sees NULLs a lot or not.
+  if (ProfileInterpreter) {
     __ profile_null_seen(r2);
-  } else {
-    __ bind(is_null);   // same as 'done'
   }
+
+  if (EnableValhalla) {
+      // Get cpool & tags index
+      __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
+      __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
+      // See if bytecode has already been quicked
+      __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
+      __ lea(r1, Address(rscratch1, r19));
+      __ ldarb(r1, r1);
+    // See if CP entry is a Q-descriptor
+    __ andr (r1, r1, JVM_CONSTANT_QDESC_BIT);
+    __ cmp(r1, (u1) JVM_CONSTANT_QDESC_BIT);
+    __ br(Assembler::NE, done);
+    __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
+  }
+
   __ bind(done);
 }
 
 void TemplateTable::instanceof() {
   transition(atos, itos);
< prev index next >