< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page
rev 53735 : AArch64 support for ValueTypes

@@ -556,10 +556,11 @@
       assert(patch_code == lir_patch_none, "no patching handled here");
       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
       break;
     }
 
+    case T_VALUETYPE: 
     case T_OBJECT: {
         if (patch_code == lir_patch_none) {
           jobject2reg(c->as_jobject(), dest->as_register());
         } else {
           jobject2reg_with_patching(dest->as_register(), info);

@@ -602,10 +603,11 @@
 }
 
 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
   LIR_Const* c = src->as_constant_ptr();
   switch (c->type()) {
+  case T_VALUETYPE: 
   case T_OBJECT:
     {
       if (! c->as_jobject())
         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
       else {

@@ -668,10 +670,11 @@
     break;
   case T_INT:
     assert(c->as_jint() == 0, "should be");
     insn = &Assembler::strw;
     break;
+  case T_VALUETYPE: 
   case T_OBJECT:
   case T_ARRAY:
     assert(c->as_jobject() == 0, "should be");
     if (UseCompressedOops && !wide) {
       insn = &Assembler::strw;

@@ -708,17 +711,17 @@
       // Can do LONG -> OBJECT
       move_regs(src->as_register_lo(), dest->as_register());
       return;
     }
     assert(src->is_single_cpu(), "must match");
-    if (src->type() == T_OBJECT) {
+    if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) {
       __ verify_oop(src->as_register());
     }
     move_regs(src->as_register(), dest->as_register());
 
   } else if (dest->is_double_cpu()) {
-    if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
+    if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) {
       // Surprising to me but we can see move of a long to t_object
       __ verify_oop(src->as_register());
       move_regs(src->as_register(), dest->as_register_lo());
       return;
     }

@@ -742,11 +745,11 @@
   }
 }
 
 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
   if (src->is_single_cpu()) {
-    if (type == T_ARRAY || type == T_OBJECT) {
+    if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
       __ verify_oop(src->as_register());
     } else if (type == T_METADATA || type == T_DOUBLE) {
       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
     } else {

@@ -780,11 +783,11 @@
   if (patch_code != lir_patch_none) {
     deoptimize_trap(info);
     return;
   }
 
-  if (type == T_ARRAY || type == T_OBJECT) {
+  if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
     __ verify_oop(src->as_register());
 
     if (UseCompressedOops && !wide) {
       __ encode_heap_oop(compressed_src, src->as_register());
     } else {

@@ -802,10 +805,11 @@
     case T_DOUBLE: {
       __ strd(src->as_double_reg(), as_Address(to_addr));
       break;
     }
 
+    case T_VALUETYPE: // fall through
     case T_ARRAY:   // fall through
     case T_OBJECT:  // fall through
       if (UseCompressedOops && !wide) {
         __ strw(compressed_src, as_Address(to_addr, rscratch2));
       } else {

@@ -855,11 +859,11 @@
 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
   assert(src->is_stack(), "should not call otherwise");
   assert(dest->is_register(), "should not call otherwise");
 
   if (dest->is_single_cpu()) {
-    if (type == T_ARRAY || type == T_OBJECT) {
+    if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
       __ verify_oop(dest->as_register());
     } else if (type == T_METADATA) {
       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
     } else {

@@ -927,11 +931,11 @@
 
 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
   LIR_Address* addr = src->as_address_ptr();
   LIR_Address* from_addr = src->as_address_ptr();
 
-  if (addr->base()->type() == T_OBJECT) {
+  if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) { 
     __ verify_oop(addr->base()->as_pointer_register());
   }
 
   if (patch_code != lir_patch_none) {
     deoptimize_trap(info);

@@ -951,10 +955,11 @@
     case T_DOUBLE: {
       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
       break;
     }
 
+    case T_VALUETYPE: // fall through
     case T_ARRAY:   // fall through
     case T_OBJECT:  // fall through
       if (UseCompressedOops && !wide) {
         __ ldrw(dest->as_register(), as_Address(from_addr));
       } else {

@@ -1005,11 +1010,11 @@
 
     default:
       ShouldNotReachHere();
   }
 
-  if (type == T_ARRAY || type == T_OBJECT) {
+  if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
     if (UseCompressedOops && !wide) {
       __ decode_heap_oop(dest->as_register());
     }
     __ verify_oop(dest->as_register());
   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {

@@ -1208,11 +1213,11 @@
 
 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
   Register len =  op->len()->as_register();
   __ uxtw(len, len);
 
-  if (UseSlowPath ||
+  if (UseSlowPath || op->type() == T_VALUETYPE ||
       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
     __ b(*op->stub()->entry());
   } else {
     Register tmp1 = op->tmp1()->as_register();

@@ -1926,14 +1931,14 @@
   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
     Register reg1 = as_reg(opr1);
     if (opr2->is_single_cpu()) {
       // cpu register - cpu register
       Register reg2 = opr2->as_register();
-      if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
+      if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) {
         __ cmpoop(reg1, reg2);
       } else {
-        assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
+        assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE,  "cmp int, oop?");
         __ cmpw(reg1, reg2);
       }
       return;
     }
     if (opr2->is_double_cpu()) {

@@ -1956,10 +1961,11 @@
         imm = opr2->as_constant_ptr()->as_jlong();
         break;
       case T_ADDRESS:
         imm = opr2->as_constant_ptr()->as_jint();
         break;
+      case T_VALUETYPE:
       case T_OBJECT:
       case T_ARRAY:
         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
         __ cmpoop(reg1, rscratch1);
         return;

@@ -2123,10 +2129,11 @@
         ShouldNotReachHere();
         break;
       }
       break;
     case T_LONG:
+    case T_VALUETYPE: 
     case T_ADDRESS:
     case T_OBJECT:
       switch (code) {
       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;

@@ -2159,10 +2166,11 @@
         break;
       }
       break;
     case T_LONG:
     case T_ADDRESS:
+    case T_VALUETYPE:
     case T_OBJECT:
       switch (code) {
       case lir_shl:  __ lsl (dreg, lreg, count); break;
       case lir_shr:  __ asr (dreg, lreg, count); break;
       case lir_ushr: __ lsr (dreg, lreg, count); break;

@@ -2887,10 +2895,11 @@
         type = 0;
         break;
       case T_INT:
       case T_LONG:
       case T_OBJECT:
+      case T_VALUETYPE:
         type = 1;
         break;
       case T_FLOAT:
         type = 2;
         break;

@@ -3153,10 +3162,11 @@
     break;
   case T_LONG:
     xchg = &MacroAssembler::atomic_xchgal;
     add = &MacroAssembler::atomic_addal;
     break;
+  case T_VALUETYPE:
   case T_OBJECT:
   case T_ARRAY:
     if (UseCompressedOops) {
       xchg = &MacroAssembler::atomic_xchgalw;
       add = &MacroAssembler::atomic_addalw;
< prev index next >