< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp

Print this page

        

@@ -461,10 +461,11 @@
     case T_BYTE:
     case T_BOOLEAN:
     case T_INT:
     case T_ARRAY:
     case T_OBJECT:
+    case T_VALUETYPE:
     case T_ADDRESS:
       if( reg_arg0 == 9999 )  {
         reg_arg0 = i;
         regs[i].set1(rcx->as_VMReg());
       } else if( reg_arg1 == 9999 )  {

@@ -511,10 +512,19 @@
 
   // return value can be odd number of VMRegImpl stack slots make multiple of 2
   return align_up(stack, 2);
 }
 
+const uint SharedRuntime::java_return_convention_max_int = 1;
+const uint SharedRuntime::java_return_convention_max_float = 1;
+int SharedRuntime::java_return_convention(const BasicType *sig_bt,
+                                          VMRegPair *regs,
+                                          int total_args_passed) {
+  Unimplemented();
+  return 0;
+}
+
 // Patch the callers callsite with entry to compiled code if it exists.
 static void patch_callers_callsite(MacroAssembler *masm) {
   Label L;
   __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
   __ jcc(Assembler::equal, L);

@@ -572,15 +582,17 @@
   int next_off = st_off - Interpreter::stackElementSize;
   __ movdbl(Address(rsp, next_off), r);
 }
 
 static void gen_c2i_adapter(MacroAssembler *masm,
-                            int total_args_passed,
-                            int comp_args_on_stack,
-                            const BasicType *sig_bt,
+                            const GrowableArray<SigEntry>& sig_extended,
                             const VMRegPair *regs,
-                            Label& skip_fixup) {
+                            Label& skip_fixup,
+                            address start,
+                            OopMapSet*& oop_maps,
+                            int& frame_complete,
+                            int& frame_size_in_words) {
   // Before we get into the guts of the C2I adapter, see if we should be here
   // at all.  We've come from compiled code and are attempting to jump to the
   // interpreter, which means the caller made a static call to get here
   // (vcalls always get a compiled target if there is one).  Check for a
   // compiled target.  If there is one, we need to patch the caller's call.

@@ -598,29 +610,29 @@
 #endif /* COMPILER2 */
 
   // Since all args are passed on the stack, total_args_passed * interpreter_
   // stack_element_size  is the
   // space we need.
-  int extraspace = total_args_passed * Interpreter::stackElementSize;
+  int extraspace = sig_extended.length() * Interpreter::stackElementSize;
 
   // Get return address
   __ pop(rax);
 
   // set senderSP value
   __ movptr(rsi, rsp);
 
   __ subptr(rsp, extraspace);
 
   // Now write the args into the outgoing interpreter space
-  for (int i = 0; i < total_args_passed; i++) {
-    if (sig_bt[i] == T_VOID) {
-      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
+  for (int i = 0; i < sig_extended.length(); i++) {
+    if (sig_extended.at(i)._bt == T_VOID) {
+      assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
       continue;
     }
 
     // st_off points to lowest address on stack.
-    int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
+    int st_off = ((sig_extended.length() - 1) - i) * Interpreter::stackElementSize;
     int next_off = st_off - Interpreter::stackElementSize;
 
     // Say 4 args:
     // i   st_off
     // 0   12 T_LONG

@@ -666,11 +678,11 @@
       } else {
         // long/double in gpr
         NOT_LP64(ShouldNotReachHere());
         // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
         // T_DOUBLE and T_LONG use two slots in the interpreter
-        if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
+        if (sig_extended.at(i)._bt == T_LONG || sig_extended.at(i)._bt == T_DOUBLE) {
           // long/double in gpr
 #ifdef ASSERT
           // Overwrite the unused slot with known junk
           LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
           __ movptr(Address(rsp, st_off), rax);

@@ -683,11 +695,11 @@
     } else {
       assert(r_1->is_XMMRegister(), "");
       if (!r_2->is_valid()) {
         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
       } else {
-        assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
+        assert(sig_extended.at(i)._bt == T_DOUBLE || sig_extended.at(i)._bt == T_LONG, "wrong type");
         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
       }
     }
   }
 

@@ -716,14 +728,14 @@
   __ jcc(Assembler::below, L_ok);
   __ bind(L_fail);
 }
 
 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
-                                    int total_args_passed,
                                     int comp_args_on_stack,
-                                    const BasicType *sig_bt,
+                                    const GrowableArray<SigEntry>& sig_extended,
                                     const VMRegPair *regs) {
+
   // Note: rsi contains the senderSP on entry. We must preserve it since
   // we may do a i2c -> c2i transition if we lose a race where compiled
   // code goes non-entrant while we get args ready.
 
   // Adapters can be frameless because they do not require the caller

@@ -808,24 +820,24 @@
   // Pre-load the register-jump target early, to schedule it better.
   __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
 
   // Now generate the shuffle code.  Pick up all register args and move the
   // rest through the floating point stack top.
-  for (int i = 0; i < total_args_passed; i++) {
-    if (sig_bt[i] == T_VOID) {
+  for (int i = 0; i < sig_extended.length(); i++) {
+    if (sig_extended.at(i)._bt == T_VOID) {
       // Longs and doubles are passed in native word order, but misaligned
       // in the 32-bit build.
-      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
+      assert(i > 0 && (sig_extended.at(i-1)._bt == T_LONG || sig_extended.at(i-1)._bt == T_DOUBLE), "missing half");
       continue;
     }
 
     // Pick up 0, 1 or 2 words from SP+offset.
 
     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
             "scrambled load targets?");
     // Load in argument order going down.
-    int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
+    int ld_off = (sig_extended.length() - i) * Interpreter::stackElementSize;
     // Point to interpreter value (vs. tag)
     int next_off = ld_off - Interpreter::stackElementSize;
     //
     //
     //

@@ -862,11 +874,11 @@
         //
         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
         // are accessed as negative so LSW is at LOW address
 
         // ld_off is MSW so get LSW
-        const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
+        const int offset = (NOT_LP64(true ||) sig_extended.at(i)._bt==T_LONG||sig_extended.at(i)._bt==T_DOUBLE)?
                            next_off : ld_off;
         __ movptr(rsi, Address(saved_sp, offset));
         __ movptr(Address(rsp, st_off), rsi);
 #ifndef _LP64
         __ movptr(rsi, Address(saved_sp, ld_off));

@@ -880,11 +892,11 @@
         //
         // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
         // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
         // So we must adjust where to pick up the data to match the interpreter.
 
-        const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
+        const int offset = (NOT_LP64(true ||) sig_extended.at(i)._bt==T_LONG||sig_extended.at(i)._bt==T_DOUBLE)?
                            next_off : ld_off;
 
         // this can be a misaligned move
         __ movptr(r, Address(saved_sp, offset));
 #ifndef _LP64

@@ -928,18 +940,18 @@
   __ jmp(rdi);
 }
 
 // ---------------------------------------------------------------
 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
-                                                            int total_args_passed,
                                                             int comp_args_on_stack,
-                                                            const BasicType *sig_bt,
+                                                            const GrowableArray<SigEntry>& sig_extended,
                                                             const VMRegPair *regs,
-                                                            AdapterFingerPrint* fingerprint) {
+                                                            AdapterFingerPrint* fingerprint,
+                                                            AdapterBlob*& new_adapter) {
   address i2c_entry = __ pc();
 
-  gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
+  gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);
 
   // -------------------------------------------------------------------------
   // Generate a C2I adapter.  On entry we know rbx, holds the Method* during calls
   // to the interpreter.  The args start out packed in the compiled layout.  They
   // need to be unpacked into the interpreter layout.  This will almost always

@@ -972,13 +984,17 @@
     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
   }
 
   address c2i_entry = __ pc();
 
-  gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
+  OopMapSet* oop_maps = NULL;
+  int frame_complete = CodeOffsets::frame_never_safe;
+  int frame_size_in_words = 0;
+  gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
 
   __ flush();
+  new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 }
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,

@@ -998,10 +1014,11 @@
     case T_FLOAT:
     case T_BYTE:
     case T_SHORT:
     case T_INT:
     case T_OBJECT:
+    case T_VALUETYPE:
     case T_ARRAY:
     case T_ADDRESS:
     case T_METADATA:
       regs[i].set1(VMRegImpl::stack2reg(stack++));
       break;

@@ -1279,10 +1296,11 @@
           } else {
             __ movl(reg, Address(rsp, offset));
           }
           break;
         case T_OBJECT:
+        case T_VALUETYPE:
         default: ShouldNotReachHere();
       }
     } else if (in_regs[i].first()->is_XMMRegister()) {
       if (in_sig_bt[i] == T_FLOAT) {
         int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;

@@ -1415,11 +1433,11 @@
                             const VMRegPair* regs) {
   Register temp_reg = rbx;  // not part of any compiled calling seq
   if (VerifyOops) {
     for (int i = 0; i < method->size_of_parameters(); i++) {
       if (sig_bt[i] == T_OBJECT ||
-          sig_bt[i] == T_ARRAY) {
+          sig_bt[i] == T_ARRAY || sig_bt[i] == T_VALUETYPE) {
         VMReg r = regs[i].first();
         assert(r->is_valid(), "bad oop arg");
         if (r->is_stack()) {
           __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
           __ verify_oop(temp_reg);

@@ -1888,10 +1906,11 @@
         if (is_critical_native) {
           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
           c_arg++;
           break;
         }
+      case T_VALUETYPE:
       case T_OBJECT:
         assert(!is_critical_native, "no oop arguments");
         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
                     ((i == 0) && (!is_static)),
                     &receiver_offset);

@@ -2070,10 +2089,11 @@
   case T_DOUBLE :
   case T_FLOAT  :
     // Result is in st0 we'll save as needed
     break;
   case T_ARRAY:                 // Really a handle
+  case T_VALUETYPE:             // Really a handle
   case T_OBJECT:                // Really a handle
       break; // can't de-handlize until after safepoint check
   case T_VOID: break;
   case T_LONG: break;
   default       : ShouldNotReachHere();

@@ -2216,11 +2236,11 @@
   // We can finally stop using that last_Java_frame we setup ages ago
 
   __ reset_last_Java_frame(thread, false);
 
   // Unbox oop result, e.g. JNIHandles::resolve value.
-  if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
+  if (ret_type == T_OBJECT || ret_type == T_ARRAY || ret_type == T_VALUETYPE) {
     __ resolve_jobject(rax /* value */,
                        thread /* thread */,
                        rcx /* tmp */);
   }
 

@@ -3168,5 +3188,10 @@
 
   // return the  blob
   // frame_size_words or bytes??
   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
 }
+
+BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) {
+  Unimplemented();
+  return NULL;
+}
< prev index next >