< prev index next >

src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp

Print this page




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "compiler/disassembler.hpp"
  28 #include "gc/shared/barrierSetAssembler.hpp"
  29 #include "interpreter/bytecodeHistogram.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "interpreter/templateInterpreterGenerator.hpp"
  34 #include "interpreter/templateTable.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/methodData.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/oop.inline.hpp"

  39 #include "prims/jvmtiExport.hpp"
  40 #include "prims/jvmtiThreadState.hpp"
  41 #include "runtime/arguments.hpp"
  42 #include "runtime/deoptimization.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/synchronizer.hpp"
  47 #include "runtime/timer.hpp"
  48 #include "runtime/vframeArray.hpp"
  49 #include "utilities/debug.hpp"
  50 #include "utilities/macros.hpp"
  51 
  52 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  53 
  54 // Size of interpreter code.  Increase if too small.  Interpreter will
  55 // fail with a guarantee ("not enough space for interpreter generation");
  56 // if too small.
  57 // Run with +PrintInterpreter to get the VM to print out the size.
  58 // Max size with JVMTI
  59 #ifdef AMD64
  60 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024;
  61 #else
  62 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
  63 #endif // AMD64
  64 
  65 // Global Register Names
  66 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  67 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  68 
  69 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
  70 const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
  71 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
  72 
  73 
  74 //-----------------------------------------------------------------------------
  75 
  76 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
  77   address entry = __ pc();
  78 
  79 #ifdef ASSERT
  80   {


 188   }
 189 #endif // COMPILER2
 190   if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
 191     __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
 192   } else {
 193     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
 194   }
 195 
 196   if (state == ftos) {
 197     __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
 198   } else if (state == dtos) {
 199     __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
 200   }
 201 #endif // _LP64
 202 
 203   // Restore stack bottom in case i2c adjusted stack
 204   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
 205   // and NULL it as marker that esp is now tos until next java call
 206   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
 207 


















































 208   __ restore_bcp();
 209   __ restore_locals();
 210 
 211   if (state == atos) {
 212     Register mdp = rbx;
 213     Register tmp = rcx;
 214     __ profile_return_type(mdp, rax, tmp);
 215   }
 216 
 217   const Register cache = rbx;
 218   const Register index = rcx;
 219   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
 220 
 221   const Register flags = cache;
 222   __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 223   __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
 224   __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
 225 
 226    const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
 227    if (JvmtiExport::can_pop_frame()) {


 330         // Store as float and empty fpu stack
 331         __ fstp_s(Address(rsp, 0));
 332         // and reload
 333         __ movflt(xmm0, Address(rsp, 0));
 334       } else if (type == T_DOUBLE && UseSSE >= 2 ) {
 335         __ movdbl(xmm0, Address(rsp, 0));
 336       } else {
 337         // restore ST0
 338         __ fld_d(Address(rsp, 0));
 339       }
 340       // and pop the temp
 341       __ addptr(rsp, 2 * wordSize);
 342       __ push(t);                           // restore return address
 343     }
 344     break;
 345 #else
 346   case T_FLOAT  : /* nothing to do */        break;
 347   case T_DOUBLE : /* nothing to do */        break;
 348 #endif // _LP64
 349 

 350   case T_OBJECT :
 351     // retrieve result from frame
 352     __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
 353     // and verify it
 354     __ verify_oop(rax);
 355     break;
 356   default       : ShouldNotReachHere();
 357   }
 358   __ ret(0);                                   // return from result handler
 359   return entry;
 360 }
 361 
 362 address TemplateInterpreterGenerator::generate_safept_entry_for(
 363         TosState state,
 364         address runtime_entry) {
 365   address entry = __ pc();
 366   __ push(state);
 367   __ call_VM(noreg, runtime_entry);
 368   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 369   return entry;




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "compiler/disassembler.hpp"
  28 #include "gc/shared/barrierSetAssembler.hpp"
  29 #include "interpreter/bytecodeHistogram.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "interpreter/templateInterpreterGenerator.hpp"
  34 #include "interpreter/templateTable.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/methodData.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/valueKlass.hpp"
  40 #include "prims/jvmtiExport.hpp"
  41 #include "prims/jvmtiThreadState.hpp"
  42 #include "runtime/arguments.hpp"
  43 #include "runtime/deoptimization.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "runtime/synchronizer.hpp"
  48 #include "runtime/timer.hpp"
  49 #include "runtime/vframeArray.hpp"
  50 #include "utilities/debug.hpp"
  51 #include "utilities/macros.hpp"
  52 
  53 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  54 
  55 // Size of interpreter code.  Increase if too small.  Interpreter will
  56 // fail with a guarantee ("not enough space for interpreter generation");
  57 // if too small.
  58 // Run with +PrintInterpreter to get the VM to print out the size.
  59 // Max size with JVMTI
  60 #ifdef AMD64
  61 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(280) NOT_JVMCI(268) * 1024;
  62 #else
  63 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
  64 #endif // AMD64
  65 
  66 // Global Register Names
  67 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  68 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  69 
  70 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
  71 const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
  72 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
  73 
  74 
  75 //-----------------------------------------------------------------------------
  76 
  77 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
  78   address entry = __ pc();
  79 
  80 #ifdef ASSERT
  81   {


 189   }
 190 #endif // COMPILER2
 191   if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
 192     __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
 193   } else {
 194     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
 195   }
 196 
 197   if (state == ftos) {
 198     __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
 199   } else if (state == dtos) {
 200     __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
 201   }
 202 #endif // _LP64
 203 
 204   // Restore stack bottom in case i2c adjusted stack
 205   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
 206   // and NULL it as marker that esp is now tos until next java call
 207   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
 208 
 209   if (state == atos && ValueTypeReturnedAsFields) {
 210 #ifndef _LP64
 211     __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf());
 212 #else
 213     // A value type might be returned. If fields are in registers we
 214     // need to allocate a value type instance and initialize it with
 215     // the value of the fields.
 216     Label skip, slow_case;
 217     // We only need a new buffered value if a new one is not returned
 218     __ testptr(rax, 1);
 219     __ jcc(Assembler::zero, skip);
 220 
 221     // Try to allocate a new buffered value (from the heap)
 222     if (UseTLAB) {
 223       __ mov(rbx, rax);
 224       __ andptr(rbx, -2);
 225 
 226       __ movl(r14, Address(rbx, Klass::layout_helper_offset()));
 227 
 228       __ movptr(r13, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
 229       __ lea(r14, Address(r13, r14, Address::times_1));
 230       __ cmpptr(r14, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
 231       __ jcc(Assembler::above, slow_case);
 232       __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), r14);
 233       __ movptr(Address(r13, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::always_locked_prototype());
 234 
 235       __ xorl(rax, rax); // use zero reg to clear memory (shorter code)
 236       __ store_klass_gap(r13, rax);  // zero klass gap for compressed oops
 237       __ mov(rax, rbx);
 238       __ store_klass(r13, rbx);  // klass
 239 
 240       // We have our new buffered value, initialize its fields with a
 241       // value class specific handler
 242       __ movptr(rbx, Address(rax, InstanceKlass::adr_valueklass_fixed_block_offset()));
 243       __ movptr(rbx, Address(rbx, ValueKlass::pack_handler_offset()));
 244       __ mov(rax, r13);
 245       __ call(rbx);
 246       __ jmp(skip);
 247     }
 248 
 249     __ bind(slow_case);
 250     // We failed to allocate a new value, fall back to a runtime
 251     // call. Some oop field may be live in some registers but we can't
 252     // tell. That runtime call will take care of preserving them
 253     // across a GC if there's one.
 254     __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf());
 255     __ bind(skip);
 256 #endif
 257   }
 258 
 259   __ restore_bcp();
 260   __ restore_locals();
 261 
 262   if (state == atos) {
 263     Register mdp = rbx;
 264     Register tmp = rcx;
 265     __ profile_return_type(mdp, rax, tmp);
 266   }
 267 
 268   const Register cache = rbx;
 269   const Register index = rcx;
 270   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
 271 
 272   const Register flags = cache;
 273   __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 274   __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
 275   __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
 276 
 277    const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
 278    if (JvmtiExport::can_pop_frame()) {


 381         // Store as float and empty fpu stack
 382         __ fstp_s(Address(rsp, 0));
 383         // and reload
 384         __ movflt(xmm0, Address(rsp, 0));
 385       } else if (type == T_DOUBLE && UseSSE >= 2 ) {
 386         __ movdbl(xmm0, Address(rsp, 0));
 387       } else {
 388         // restore ST0
 389         __ fld_d(Address(rsp, 0));
 390       }
 391       // and pop the temp
 392       __ addptr(rsp, 2 * wordSize);
 393       __ push(t);                           // restore return address
 394     }
 395     break;
 396 #else
 397   case T_FLOAT  : /* nothing to do */        break;
 398   case T_DOUBLE : /* nothing to do */        break;
 399 #endif // _LP64
 400 
 401   case T_VALUETYPE: // fall through (value types are handled with oops)
 402   case T_OBJECT :
 403     // retrieve result from frame
 404     __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
 405     // and verify it
 406     __ verify_oop(rax);
 407     break;
 408   default       : ShouldNotReachHere();
 409   }
 410   __ ret(0);                                   // return from result handler
 411   return entry;
 412 }
 413 
 414 address TemplateInterpreterGenerator::generate_safept_entry_for(
 415         TosState state,
 416         address runtime_entry) {
 417   address entry = __ pc();
 418   __ push(state);
 419   __ call_VM(noreg, runtime_entry);
 420   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 421   return entry;


< prev index next >