< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page
rev 54670 : Port of valuetypes to aarch64

@@ -1294,11 +1294,15 @@
   bind(L_fallthrough);
 }
 
 
 void MacroAssembler::verify_oop(Register reg, const char* s) {
-  if (!VerifyOops) return;
+  if (!VerifyOops || VerifyAdapterSharing) {
+    // Below address of the code string confuses VerifyAdapterSharing
+    // because it may differ between otherwise equivalent adapters.
+    return;
+  }
 
   // Pass register number to verify_oop_subroutine
   const char* b = NULL;
   {
     ResourceMark rm;

@@ -1324,11 +1328,15 @@
 
   BLOCK_COMMENT("} verify_oop");
 }
 
 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
-  if (!VerifyOops) return;
+  if (!VerifyOops || VerifyAdapterSharing) {
+    // Below address of the code string confuses VerifyAdapterSharing
+    // because it may differ between otherwise equivalent adapters.
+    return;
+  }
 
   const char* b = NULL;
   {
     ResourceMark rm;
     stringStream ss;

@@ -1427,10 +1435,14 @@
   pass_arg1(this, arg_1);
   pass_arg2(this, arg_2);
   call_VM_leaf_base(entry_point, 3);
 }
 
+void MacroAssembler::super_call_VM_leaf(address entry_point) {
+  MacroAssembler::call_VM_leaf_base(entry_point, 1);
+}
+
 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
   pass_arg0(this, arg_0);
   MacroAssembler::call_VM_leaf_base(entry_point, 1);
 }
 

@@ -1476,10 +1488,43 @@
     // nothing to do, (later) access of M[reg + offset]
     // will provoke OS NULL exception if reg = NULL
   }
 }
 
+void MacroAssembler::test_klass_is_value(Register klass, Register temp_reg, Label& is_value) {
+  ldrw(temp_reg, Address(klass, Klass::access_flags_offset()));
+  andr(temp_reg, temp_reg, JVM_ACC_VALUE);
+  cbnz(temp_reg, is_value); 
+}
+
+void MacroAssembler::test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable) {
+  (void) temp_reg; // keep signature uniform with x86
+  tbnz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, is_flattenable);
+}
+
+void MacroAssembler::test_field_is_not_flattenable(Register flags, Register temp_reg, Label& not_flattenable) {
+  (void) temp_reg; // keep signature uniform with x86
+  tbz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, not_flattenable);
+}
+
+void MacroAssembler::test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened) {
+  (void) temp_reg; // keep signature uniform with x86
+  tbnz(flags, ConstantPoolCacheEntry::is_flattened_field_shift, is_flattened);
+}
+
+void MacroAssembler::test_flat_array_klass(Register klass, Register temp_reg, Label& is_flattened) {
+  ldrw(temp_reg, Address(klass, Klass::layout_helper_offset()));
+  asrw(temp_reg, temp_reg, Klass::_lh_array_tag_shift);
+  cmpw(temp_reg, Klass::_lh_array_tag_vt_value);
+  br(Assembler::EQ, is_flattened);
+}
+
+void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flattened) {
+  load_klass(temp_reg, oop);
+  test_flat_array_klass(temp_reg, temp_reg, is_flattened);
+}
+
 // MacroAssembler protected routines needed to implement
 // public methods
 
 void MacroAssembler::mov(Register r, Address dest) {
   code_section()->relocate(pc(), dest.rspec());

@@ -5850,5 +5895,282 @@
     mov(dst, c_rarg0);
   }
 
   pop(saved_regs, sp);
 }
+
+// C2 compiled method's prolog code 
+// Moved here from aarch64.ad to support Valhalla code belows
+void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
+
+// n.b. frame size includes space for return pc and rfp
+  const long framesize = C->frame_size_in_bytes();
+  assert(framesize % (2 * wordSize) == 0, "must preserve 2 * wordSize alignment");
+
+  // insert a nop at the start of the prolog so we can patch in a
+  // branch if we need to invalidate the method later
+  nop();
+
+  int bangsize = C->bang_size_in_bytes();
+  if (C->need_stack_bang(bangsize) && UseStackBanging)
+     generate_stack_overflow_check(bangsize);
+
+  build_frame(framesize);
+
+  if (NotifySimulator) {
+    notify(Assembler::method_entry);
+  }
+
+  if (VerifyStackAtCalls) {
+    Unimplemented();
+  }
+}
+
+
+// DMS TODO: Need extra eyes to bring code below to good shape. 
+// 
+void MacroAssembler::unpack_value_args(Compile* C, bool receiver_only) {
+
+  assert(C->has_scalarized_args(), "value type argument scalarization is disabled");
+  Method* method = C->method()->get_Method();
+  const GrowableArray<SigEntry>* sig_cc = method->adapter()->get_sig_cc();
+  assert(sig_cc != NULL, "must have scalarized signature");
+
+  // Get unscalarized calling convention
+  BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig_cc->length());
+  int args_passed = 0;
+  if (!method->is_static()) {
+    sig_bt[args_passed++] = T_OBJECT;
+  }
+  if (!receiver_only) {
+    for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
+      BasicType bt = ss.type();
+      sig_bt[args_passed++] = bt;
+      if (type2size[bt] == 2) {
+        sig_bt[args_passed++] = T_VOID;
+      }
+    }
+  } else {
+    // Only unpack the receiver, all other arguments are already scalarized
+    InstanceKlass* holder = method->method_holder();
+    int rec_len = holder->is_value() ? ValueKlass::cast(holder)->extended_sig()->length() : 1;
+    // Copy scalarized signature but skip receiver, value type delimiters and reserved entries
+    for (int i = 0; i < sig_cc->length(); i++) {
+      if (!SigEntry::is_reserved_entry(sig_cc, i)) {
+        if (SigEntry::skip_value_delimiters(sig_cc, i) && rec_len <= 0) {
+          sig_bt[args_passed++] = sig_cc->at(i)._bt;
+        }
+        rec_len--;
+      }
+    }
+  }
+
+  VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, args_passed);
+  int args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, args_passed, false);
+
+  // Get scalarized calling convention
+  int args_passed_cc = SigEntry::fill_sig_bt(sig_cc, sig_bt);
+  VMRegPair* regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, sig_cc->length());
+  int args_on_stack_cc = SharedRuntime::java_calling_convention(sig_bt, regs_cc, args_passed_cc, false);
+
+  // Check if we need to extend the stack for unpacking
+  int sp_inc = (args_on_stack_cc - args_on_stack) * VMRegImpl::stack_slot_size;
+  if (sp_inc > 0) {
+    // Save the return address, adjust the stack (make sure it is properly
+    // 16-byte aligned) and copy the return address to the new top of the stack.
+    // pop(r13);
+    sp_inc = align_up(sp_inc, StackAlignmentInBytes);
+    // DMS CHECK: subptr(rsp, sp_inc); 
+    sub(sp, sp, sp_inc); 
+    // push(r13);
+  } else {
+    // The scalarized calling convention needs less stack space than the unscalarized one.
+    // No need to extend the stack, the caller will take care of these adjustments.
+    sp_inc = 0;
+  }
+
+  // Initialize register/stack slot states (make all writable)
+  int max_stack = MAX2(args_on_stack + sp_inc/VMRegImpl::stack_slot_size, args_on_stack_cc);
+  int max_reg = VMRegImpl::stack2reg(max_stack)->value();
+  RegState* reg_state = NEW_RESOURCE_ARRAY(RegState, max_reg);
+  for (int i = 0; i < max_reg; ++i) {
+    reg_state[i] = reg_writable;
+  }
+  // Set all source registers/stack slots to readonly to prevent accidental overwriting
+  for (int i = 0; i < args_passed; ++i) {
+    VMReg reg = regs[i].first();
+    if (!reg->is_valid()) continue;
+    if (reg->is_stack()) {
+      // Update source stack location by adding stack increment
+      reg = VMRegImpl::stack2reg(reg->reg2stack() + sp_inc/VMRegImpl::stack_slot_size);
+      regs[i] = reg;
+    }
+    assert(reg->value() >= 0 && reg->value() < max_reg, "reg value out of bounds");
+    reg_state[reg->value()] = reg_readonly;
+  }
+
+
+  // Emit code for unpacking value type arguments
+  // We try multiple times and eventually start spilling to resolve (circular) dependencies
+  bool done = false;
+  for (int i = 0; i < 2 * args_passed_cc && !done; ++i) {
+    done = true;
+    bool spill = (i > args_passed_cc); // Start spilling?
+    // Iterate over all arguments (in reverse)
+    for (int from_index = args_passed - 1, to_index = args_passed_cc - 1, sig_index = sig_cc->length() - 1; sig_index >= 0; sig_index--) {
+      if (SigEntry::is_reserved_entry(sig_cc, sig_index)) {
+        to_index--; // Skip reserved entry
+      } else {
+        assert(from_index >= 0, "index out of bounds");
+        VMReg reg = regs[from_index].first();
+        if (spill && reg->is_valid() && reg_state[reg->value()] == reg_readonly) {
+          // Spill argument to be able to write the source and resolve circular dependencies
+          VMReg spill_reg = r14->as_VMReg();
+          bool res = move_helper(reg, spill_reg, T_DOUBLE, reg_state, sp_inc);
+          assert(res, "Spilling should not fail");
+          // Set spill_reg as new source and update state
+          reg = spill_reg;
+          regs[from_index].set1(reg);
+          reg_state[reg->value()] = reg_readonly;
+          spill = false; // Do not spill again in this round
+        }
+        BasicType bt = sig_cc->at(sig_index)._bt;
+        if (SigEntry::skip_value_delimiters(sig_cc, sig_index)) {
+          assert(to_index >= 0, "index out of bounds");
+          done &= move_helper(reg, regs_cc[to_index].first(), bt, reg_state, sp_inc);
+          to_index--;
+        } else if (!receiver_only || (from_index == 0 && bt == T_VOID)) {
+          done &= unpack_value_helper(sig_cc, sig_index, reg, regs_cc, to_index, reg_state, sp_inc);
+        } else {
+          continue;
+        }
+        from_index--;
+      }
+    }
+  }
+  guarantee(done, "Could not resolve circular dependency when unpacking value type arguments");
+
+  // Emit code for verified entry and save increment for stack repair on return
+  verified_entry(C, sp_inc);
+}
+
+bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[], int ret_off) {
+ if (reg_state[to->value()] == reg_written) {
+    return true; // Already written
+  }
+  if (from != to && bt != T_VOID) {
+    if (reg_state[to->value()] == reg_readonly) {
+      return false; // Not yet writable
+    }
+    if (from->is_reg()) {
+      if (to->is_reg()) {
+          mov(to->as_Register(), from->as_Register());
+      } else {
+        int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
+        assert(st_off != ret_off, "overwriting return address at %d", st_off);
+        Address to_addr = Address(sp, st_off);
+        str(from->as_Register(), to_addr);
+      }
+    } else {
+      Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
+      if (to->is_reg()) {
+        ldr(to->as_Register(), from_addr);
+      } else {
+        int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
+        assert(st_off != ret_off, "overwriting return address at %d", st_off);
+        ldr(rscratch1, from_addr);
+        str(rscratch1, Address(sp, st_off));
+      }
+    }
+  }
+  // Update register states
+  reg_state[from->value()] = reg_writable;
+  reg_state[to->value()] = reg_written;
+  return true;
+}
+
+bool MacroAssembler::unpack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, VMReg from, VMRegPair* regs_to, int& to_index, RegState reg_state[], int ret_off) {
+  Register fromReg = from->is_reg() ? from->as_Register() : noreg;
+  assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
+
+  int vt = 1;
+  bool done = true;
+  bool mark_done = true;
+  do {
+    sig_index--;
+    BasicType bt = sig->at(sig_index)._bt;
+    if (bt == T_VALUETYPE) {
+      vt--;
+    } else if (bt == T_VOID && sig->at(sig_index-1)._bt != T_LONG && sig->at(sig_index-1)._bt != T_DOUBLE) {
+      vt++;
+    } else if (SigEntry::is_reserved_entry(sig, sig_index)) {
+      to_index--; // Ignore this
+    } else {
+
+      assert(to_index >= 0, "invalid to_index");
+      VMRegPair pair_to = regs_to[to_index--];
+      VMReg r_1 = pair_to.first();
+      VMReg r_2 = pair_to.second();
+
+      if (bt == T_VOID) continue;
+
+      int idx = (int) r_1->value();
+      if (reg_state[idx] == reg_readonly) {
+         if (idx != from->value()) {
+           mark_done = false;
+         }
+         done = false;
+         continue;
+      } else if (reg_state[idx] == reg_written) {
+        continue;
+      } else {
+        assert(reg_state[idx] == reg_writable, "must be writable");
+        reg_state[idx] = reg_written;
+      }
+
+      if (fromReg == noreg) {
+        int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
+        ldr(r10, Address(sp, st_off)); 
+        fromReg = r10;
+      }
+
+      int off = sig->at(sig_index)._offset;
+      assert(off > 0, "offset in object should be positive");
+
+      Address fromAddr = Address(fromReg, off);
+
+      if (r_1->is_stack()) {
+        // Convert stack slot to an SP offset (+ wordSize to account for return address )
+         int st_off = r_1->reg2stack() * VMRegImpl::stack_slot_size;
+        if (!r_2->is_valid()) {
+          // sign extend???
+          ldrsw(rscratch2, fromAddr);
+          str(rscratch2, Address(sp, st_off));
+        } else {
+          ldr(rscratch2, fromAddr);
+          str(rscratch2, Address(sp, st_off));
+        }
+     } else if (r_1->is_Register()) {  // Register argument
+        Register r = r_1->as_Register();
+        if (r_2->is_valid()) {
+          ldr(r, fromAddr);
+        } else {
+          ldrw(r, fromAddr);
+        }
+     } else { 
+       if (!r_2->is_valid()) {
+         ldrs(r_1->as_FloatRegister(), fromAddr);
+       } else {
+         ldrd(r_1->as_FloatRegister(), fromAddr);
+       }
+    }
+
+   }
+  } while (vt != 0);
+
+  if (mark_done && reg_state[from->value()] != reg_written) {
+    // This is okay because no one else will write to that slot
+    reg_state[from->value()] = reg_writable;
+  }
+  return done;
+}
+
< prev index next >