< prev index next >

src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp

Print this page




  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateInterpreterGenerator.hpp"
  32 #include "interpreter/templateTable.hpp"
  33 #include "oops/arrayOop.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/method.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/jvmtiExport.hpp"
  38 #include "prims/jvmtiThreadState.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/synchronizer.hpp"
  45 #include "runtime/timer.hpp"
  46 #include "runtime/vframeArray.hpp"
  47 #include "utilities/debug.hpp"
  48 #include "utilities/macros.hpp"

  49 
  50 #define __ _masm->
  51 
  52 // Size of interpreter code.  Increase if too small.  Interpreter will
  53 // fail with a guarantee ("not enough space for interpreter generation");
  54 // if too small.
  55 // Run with +PrintInterpreter to get the VM to print out the size.
  56 // Max size with JVMTI
  57 #ifdef AMD64
  58 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024;
  59 #else
  60 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
  61 #endif // AMD64
  62 
  63 // Global Register Names
  64 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  65 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  66 
  67 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
  68 const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;


 181     for (int i = 1; i < 8; i++) {
 182         __ ffree(i);
 183     }
 184   } else if (UseSSE < 2) {
 185     __ empty_FPU_stack();
 186   }
 187 #endif // COMPILER2
 188   if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
 189     __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
 190   } else {
 191     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
 192   }
 193 
 194   if (state == ftos) {
 195     __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
 196   } else if (state == dtos) {
 197     __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
 198   }
 199 #endif // _LP64
 200 


 201   // Restore stack bottom in case i2c adjusted stack
 202   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));


 203   // and NULL it as marker that esp is now tos until next java call
 204   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
 205 
 206   __ restore_bcp();
 207   __ restore_locals();
 208 
 209   if (state == atos) {
 210     Register mdp = rbx;
 211     Register tmp = rcx;
 212     __ profile_return_type(mdp, rax, tmp);
 213   }
 214 
 215   const Register cache = rbx;
 216   const Register index = rcx;
 217   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
 218 
 219   const Register flags = cache;
 220   __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 221   __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
 222   __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));


 637   // store object
 638   __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
 639   const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
 640   __ movptr(lockreg, rsp); // object address
 641   __ lock_object(lockreg);
 642 }
 643 
 644 // Generate a fixed interpreter frame. This is identical setup for
 645 // interpreted methods and for native methods hence the shared code.
 646 //
 647 // Args:
 648 //      rax: return address
 649 //      rbx: Method*
 650 //      r14/rdi: pointer to locals
 651 //      r13/rsi: sender sp
 652 //      rdx: cp cache
 653 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 654   // initialize fixed part of activation frame
 655   __ push(rax);        // save return address
 656   __ enter();          // save old & set new rbp

 657   __ push(rbcp);        // set sender sp

 658   __ push((int)NULL_WORD); // leave last_sp as null
 659   __ movptr(rbcp, Address(rbx, Method::const_offset()));      // get ConstMethod*
 660   __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
 661   __ push(rbx);        // save Method*
 662   // Get mirror and store it in the frame as GC root for this Method*
 663   __ load_mirror(rdx, rbx);
 664   __ push(rdx);
 665   if (ProfileInterpreter) {
 666     Label method_data_continue;
 667     __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
 668     __ testptr(rdx, rdx);
 669     __ jcc(Assembler::zero, method_data_continue);
 670     __ addptr(rdx, in_bytes(MethodData::data_offset()));
 671     __ bind(method_data_continue);
 672     __ push(rdx);      // set the mdp (method data pointer)
 673   } else {
 674     __ push(0);
 675   }
 676 
 677   __ movptr(rdx, Address(rbx, Method::const_offset()));
 678   __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
 679   __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
 680   __ push(rdx); // set constant pool cache
 681   __ push(rlocals); // set locals pointer



 682   if (native_call) {
 683     __ push(0); // no bcp
 684   } else {
 685     __ push(rbcp); // set bcp
 686   }
 687   __ push(0); // reserve word for pointer to expression stack bottom
 688   __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
 689 }
 690 






























































 691 // End of helpers
 692 



































































































































 693 // Method entry for java.lang.ref.Reference.get.
 694 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 695 #if INCLUDE_ALL_GCS
 696   // Code: _aload_0, _getfield, _areturn
 697   // parameter size = 1
 698   //
 699   // The code that gets generated by this routine is split into 2 parts:
 700   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 701   //    2. The slow path - which is an expansion of the regular method entry.
 702   //
 703   // Notes:-
 704   // * In the G1 code we do not check whether we need to block for
 705   //   a safepoint. If G1 is enabled then we must execute the specialized
 706   //   code for Reference.get (except when the Reference object is null)
 707   //   so that we can log the value in the referent field with an SATB
 708   //   update buffer.
 709   //   If the code for the getfield template is modified so that the
 710   //   G1 pre-barrier code is executed when the current method is
 711   //   Reference.get() then going through the normal method entry
 712   //   will be fine.


1021     Label L;
1022     __ movptr(rax, Address(method, Method::native_function_offset()));
1023     ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1024     __ cmpptr(rax, unsatisfied.addr());
1025     __ jcc(Assembler::notEqual, L);
1026     __ call_VM(noreg,
1027                CAST_FROM_FN_PTR(address,
1028                                 InterpreterRuntime::prepare_native_call),
1029                method);
1030     __ get_method(method);
1031     __ movptr(rax, Address(method, Method::native_function_offset()));
1032     __ bind(L);
1033   }
1034 
1035   // pass JNIEnv
1036 #ifndef _LP64
1037    __ get_thread(thread);
1038    __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1039    __ movptr(Address(rsp, 0), t);
1040 

1041    // set_last_Java_frame_before_call
1042    // It is enough that the pc()
1043    // points into the right code segment. It does not have to be the correct return pc.
1044    __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1045 #else
1046    __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
1047 
1048    // It is enough that the pc() points into the right code
1049    // segment. It does not have to be the correct return pc.
1050    __ set_last_Java_frame(rsp, rbp, (address) __ pc());
1051 #endif // _LP64
1052 
1053   // change thread state
1054 #ifdef ASSERT
1055   {
1056     Label L;
1057     __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1058     __ cmpl(t, _thread_in_Java);
1059     __ jcc(Assembler::equal, L);
1060     __ stop("Wrong thread state in native stub");


1302   // Note: This must happen _after_ handling/throwing any exceptions since
1303   //       the exception handler code notifies the runtime of method exits
1304   //       too. If this happens before, method entry/exit notifications are
1305   //       not properly paired (was bug - gri 11/22/99).
1306   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1307 
1308   // restore potential result in edx:eax, call result handler to
1309   // restore potential result in ST0 & handle result
1310 
1311   __ pop(ltos);
1312   LP64_ONLY( __ pop(dtos));
1313 
1314   __ movptr(t, Address(rbp,
1315                        (frame::interpreter_frame_result_handler_offset) * wordSize));
1316   __ call(t);
1317 
1318   // remove activation
1319   __ movptr(t, Address(rbp,
1320                        frame::interpreter_frame_sender_sp_offset *
1321                        wordSize)); // get sender sp

1322   __ leave();                                // remove frame anchor

1323   __ pop(rdi);                               // get return address
1324   __ mov(rsp, t);                            // set sp to sender sp
1325   __ jmp(rdi);
1326 
1327   if (inc_counter) {
1328     // Handle overflow of counter and compile method
1329     __ bind(invocation_counter_overflow);
1330     generate_counter_overflow(continue_after_compile);
1331   }
1332 
1333   return entry_point;
1334 }
1335 
1336 // Abstract method entry
1337 // Attempt to execute abstract method. Throw exception
1338 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
1339 
1340   address entry_point = __ pc();
1341 
1342   // abstract method entry
1343 
1344   //  pop return address, reset last_sp to NULL
1345   __ empty_expression_stack();
1346   __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
1347   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
1348 
1349   // throw exception
1350   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
1351   // the call_VM checks for exception, so we should never return here.
1352   __ should_not_reach_here();
1353 
1354   return entry_point;
1355 }
1356 




1357 //
1358 // Generic interpreted method entry to (asm) interpreter
1359 //
1360 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1361   // determine code generation flags
1362   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1363 
1364   // ebx: Method*
1365   // rbcp: sender sp
1366   address entry_point = __ pc();
1367 
1368   const Address constMethod(rbx, Method::const_offset());
1369   const Address access_flags(rbx, Method::access_flags_offset());
1370   const Address size_of_parameters(rdx,
1371                                    ConstMethod::size_of_parameters_offset());
1372   const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
1373 
1374 
1375   // get parameter size (always needed)
1376   __ movptr(rdx, constMethod);
1377   __ load_unsigned_short(rcx, size_of_parameters);
1378 
1379   // rbx: Method*
1380   // rcx: size of parameters
1381   // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
1382 
1383   __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1384   __ subl(rdx, rcx); // rdx = no. of additional locals
1385 
1386   // YYY
1387 //   __ incrementl(rdx);
1388 //   __ andl(rdx, -2);
1389 


1390   // see if we've got enough room on the stack for locals plus overhead.
1391   generate_stack_overflow_check();
1392 
1393   // get return address
1394   __ pop(rax);








1395 
1396   // compute beginning of parameters
1397   __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1398 
1399   // rdx - # of additional locals
1400   // allocate space for locals
1401   // explicitly initialize locals
1402   {
1403     Label exit, loop;
1404     __ testl(rdx, rdx);
1405     __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1406     __ bind(loop);
1407     __ push((int) NULL_WORD); // initialize local variables
1408     __ decrementl(rdx); // until everything initialized
1409     __ jcc(Assembler::greater, loop);
1410     __ bind(exit);
1411   }
1412 
1413   // initialize fixed part of activation frame
1414   generate_fixed_frame(false);
1415 




1416   // make sure method is not native & not abstract
1417 #ifdef ASSERT
1418   __ movl(rax, access_flags);
1419   {
1420     Label L;
1421     __ testl(rax, JVM_ACC_NATIVE);
1422     __ jcc(Assembler::zero, L);
1423     __ stop("tried to execute native method as non-native");
1424     __ bind(L);
1425   }
1426   {
1427     Label L;
1428     __ testl(rax, JVM_ACC_ABSTRACT);
1429     __ jcc(Assembler::zero, L);
1430     __ stop("tried to execute abstract method in interpreter");
1431     __ bind(L);
1432   }
1433 #endif
1434 
1435   // Since at this point in the method invocation the exception


1444         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1445   __ movbool(do_not_unlock_if_synchronized, true);
1446 
1447   __ profile_parameters_type(rax, rcx, rdx);
1448   // increment invocation count & check for overflow
1449   Label invocation_counter_overflow;
1450   Label profile_method;
1451   Label profile_method_continue;
1452   if (inc_counter) {
1453     generate_counter_incr(&invocation_counter_overflow,
1454                           &profile_method,
1455                           &profile_method_continue);
1456     if (ProfileInterpreter) {
1457       __ bind(profile_method_continue);
1458     }
1459   }
1460 
1461   Label continue_after_compile;
1462   __ bind(continue_after_compile);
1463 





1464   // check for synchronized interpreted methods
1465   bang_stack_shadow_pages(false);
1466 


1467   // reset the _do_not_unlock_if_synchronized flag
1468   NOT_LP64(__ get_thread(thread));
1469   __ movbool(do_not_unlock_if_synchronized, false);
1470 
1471   // check for synchronized methods
1472   // Must happen AFTER invocation_counter check and stack overflow check,
1473   // so method is not locked if overflows.
1474   if (synchronized) {
1475     // Allocate monitor and lock method
1476     lock_method();
1477   } else {
1478     // no synchronization necessary
1479 #ifdef ASSERT
1480     {
1481       Label L;
1482       __ movl(rax, access_flags);
1483       __ testl(rax, JVM_ACC_SYNCHRONIZED);
1484       __ jcc(Assembler::zero, L);
1485       __ stop("method needs synchronization");
1486       __ bind(L);


1488 #endif
1489   }
1490 
1491   // start execution
1492 #ifdef ASSERT
1493   {
1494     Label L;
1495      const Address monitor_block_top (rbp,
1496                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
1497     __ movptr(rax, monitor_block_top);
1498     __ cmpptr(rax, rsp);
1499     __ jcc(Assembler::equal, L);
1500     __ stop("broken stack frame setup in interpreter");
1501     __ bind(L);
1502   }
1503 #endif
1504 
1505   // jvmti support
1506   __ notify_method_entry();
1507 




1508   __ dispatch_next(vtos);
1509 




1510   // invocation counter overflow
1511   if (inc_counter) {
1512     if (ProfileInterpreter) {
1513       // We have decided to profile this method in the interpreter
1514       __ bind(profile_method);
1515       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1516       __ set_method_data_pointer_for_bcp();
1517       __ get_method(rbx);
1518       __ jmp(profile_method_continue);
1519     }
1520     // Handle overflow of counter and compile method
1521     __ bind(invocation_counter_overflow);
1522     generate_counter_overflow(continue_after_compile);
1523   }
1524 
1525   return entry_point;
1526 }
1527 
1528 //-----------------------------------------------------------------------------
1529 // Exceptions




  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateInterpreterGenerator.hpp"
  32 #include "interpreter/templateTable.hpp"
  33 #include "oops/arrayOop.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/method.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/jvmtiExport.hpp"
  38 #include "prims/jvmtiThreadState.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/synchronizer.hpp"
  45 #include "runtime/timer.hpp"
  46 #include "runtime/vframeArray.hpp"
  47 #include "utilities/debug.hpp"
  48 #include "utilities/macros.hpp"
  49 #include "classfile/systemDictionary.hpp"
  50 
  51 #define __ _masm->
  52 
  53 // Size of interpreter code.  Increase if too small.  Interpreter will
  54 // fail with a guarantee ("not enough space for interpreter generation");
  55 // if too small.
  56 // Run with +PrintInterpreter to get the VM to print out the size.
  57 // Max size with JVMTI
  58 #ifdef AMD64
  59 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024;
  60 #else
  61 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
  62 #endif // AMD64
  63 
  64 // Global Register Names
  65 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  66 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  67 
  68 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
  69 const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;


 182     for (int i = 1; i < 8; i++) {
 183         __ ffree(i);
 184     }
 185   } else if (UseSSE < 2) {
 186     __ empty_FPU_stack();
 187   }
 188 #endif // COMPILER2
 189   if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
 190     __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
 191   } else {
 192     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
 193   }
 194 
 195   if (state == ftos) {
 196     __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
 197   } else if (state == dtos) {
 198     __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
 199   }
 200 #endif // _LP64
 201 
 202   // __ stop("12121212");
 203 
 204   // Restore stack bottom in case i2c adjusted stack
 205   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
 206   // __ addq(rsp, rbp); // XXXXXXXX see generate_fixed_frame
 207 
 208   // and NULL it as marker that esp is now tos until next java call
 209   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
 210 
 211   __ restore_bcp();
 212   __ restore_locals();
 213 
 214   if (state == atos) {
 215     Register mdp = rbx;
 216     Register tmp = rcx;
 217     __ profile_return_type(mdp, rax, tmp);
 218   }
 219 
 220   const Register cache = rbx;
 221   const Register index = rcx;
 222   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
 223 
 224   const Register flags = cache;
 225   __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 226   __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
 227   __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));


 642   // store object
 643   __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
 644   const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
 645   __ movptr(lockreg, rsp); // object address
 646   __ lock_object(lockreg);
 647 }
 648 
 649 // Generate a fixed interpreter frame. This is identical setup for
 650 // interpreted methods and for native methods hence the shared code.
 651 //
 652 // Args:
 653 //      rax: return address
 654 //      rbx: Method*
 655 //      r14/rdi: pointer to locals
 656 //      r13/rsi: sender sp
 657 //      rdx: cp cache
 658 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 659   // initialize fixed part of activation frame
 660   __ push(rax);        // save return address
 661   __ enter();          // save old & set new rbp
 662   __ subq(rbcp, rbp); // must be added to rbp to get the old one.
 663   __ push(rbcp);    // set sender sp
 664 
 665   __ push((int)NULL_WORD); // leave last_sp as null
 666   __ movptr(rbcp, Address(rbx, Method::const_offset()));      // get ConstMethod*
 667   __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
 668   __ push(rbx);        // save Method*
 669   // Get mirror and store it in the frame as GC root for this Method*
 670   __ load_mirror(rdx, rbx);
 671   __ push(rdx); // oop
 672   if (ProfileInterpreter) {
 673     Label method_data_continue;
 674     __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
 675     __ testptr(rdx, rdx);
 676     __ jcc(Assembler::zero, method_data_continue);
 677     __ addptr(rdx, in_bytes(MethodData::data_offset()));
 678     __ bind(method_data_continue);
 679     __ push(rdx);      // set the mdp (method data pointer)
 680   } else {
 681     __ push(0);
 682   }
 683 
 684   __ movptr(rdx, Address(rbx, Method::const_offset()));
 685   __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
 686   __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
 687   __ push(rdx); // set constant pool cache
 688   // __ push(rlocals); // set locals pointer
 689   __ mov(rdx, rlocals);
 690   __ subq(rdx, rbp);
 691   __ push(rdx); // set locals pointer
 692   if (native_call) {
 693     __ push(0); // no bcp
 694   } else {
 695     __ push(rbcp); // set bcp
 696   }
 697   __ push(0); // reserve word for pointer to expression stack bottom
 698   __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
 699 }
 700 
 701 void TemplateInterpreterGenerator::generate_continuation_frame(bool native_call, AbstractInterpreter::MethodKind kind) {
 702   // rdx = no. of additional locals
 703   // rcx = size of parameters
 704   // rbx = Method*
 705   // free registers: rax, 
 706 
 707   if (kind == AbstractInterpreter::java_lang_continuation_getStack) {
 708     // return;
 709   }
 710 
 711   // total overhead size: entry_size + (saved rbp through expr stack
 712   // bottom).  be sure to change this if you add/subtract anything
 713   // to/from the overhead area
 714   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 715   const int overhead = -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
 716   assert(overhead > 0, "overhead is 0 or negative");
 717   // test if we're in a continuation
 718   Label done, done0;
 719   Register cont = r10;
 720   const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
 721   NOT_LP64(__ get_thread(thread));
 722 
 723   __ movptr(cont, Address(thread, in_bytes(JavaThread::continuation_offset())));
 724   __ testl(cont, cont);
 725   __ jcc(Assembler::zero, done);
 726 
 727   Register array = r11, index_rsp = r13; // index_rbp = r10; (r12 is taken)
 728   __ continuation_save_registers(cont, array, index_rsp);
 729 
 730   __ load_heap_oop(array, Address(cont, java_lang_Continuation::stack_offset));
 731   __ lea(array, Address(array, arrayOopDesc::base_offset_in_bytes(T_INT)));
 732 
 733   __ movptr(index_rsp, rsp);
 734   __ subq(index_rsp, array); // now index is the offset _in bytes_ into the array
 735 
 736   __ cmpl(index_rsp, overhead);  
 737   __ jcc(Assembler::above, done0); // fast path
 738 
 739   // slow path -- call getStack to allocate a new stack for us
 740 
 741   // Done by continuation_exit:
 742   __ shrq(index_rsp, 2); // now index is the (int) index into the array
 743 
 744   // __ movq(index_rbp, rbp);
 745   // __ subq(index_rbp, array);
 746   // __ shrq(index_rbp, 2); // now index is the (int) index into the array
 747 
 748   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
 749   __ call_VM(cont, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_continuation_stack), cont, index_rsp, rdx);
 750 
 751   __ stop("generate_continuation_frame slow 1111");
 752   // Done by continuation_enter:
 753   // __ load_heap_oop(array, Address(cont, java_lang_Continuation::stack_offset)); // now array is the new array
 754   // __ lea(rsp, Address(array, index_rsp, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
 755   // __ lea(rbp, Address(array, index_rbp, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
 756 
 757   __ bind(done0);  
 758   __ continuation_restore_registers(cont, array, index_rsp);
 759 
 760   __ bind(done);
 761 }
 762 
 763 // End of helpers
 764 
 765 /*address TemplateInterpreterGenerator::generate_Continuation_run_entry(void) {
 766   address entry = __ pc();
 767   
 768   // CallInfo callinfo;
 769   // Klass* recvrKlass = SystemDictionary::resolve_or_null(vmSymbols::java_lang_Continuation(), NULL); // SystemDictionary::Continuation_klass();
 770   // LinkInfo link_info(recvrKlass, vmSymbols::enter_name(), vmSymbols::continuationEnter_signature());
 771   // LinkResolver::resolve_special_call(callinfo, Handle(), link_info, NULL);
 772   // methodHandle method = callinfo.selected_method();
 773   // assert(method.not_null(), "should have thrown exception");
 774 
 775   const Address constMethod(rbx, Method::const_offset());
 776   // const Address access_flags(rbx, Method::access_flags_offset());
 777 
 778   __ movptr(rdx, constMethod);
 779 
 780   const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
 781   // const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
 782 
 783   __ load_unsigned_short(rcx, size_of_parameters);
 784   __ subl(rdx, rdx);// rdx = 0 = no. of additional locals
 785   
 786   // rbx: Method*
 787   // rcx: size of parameters
 788   // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
 789 
 790   // see if we've got enough room on the stack for locals plus overhead.
 791   // generate_stack_overflow_check();
 792 
 793   __ pop(rax); // get return address
 794 
 795   // compute beginning of parameters
 796   __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
 797   __ movptr(rax, Address(rlocals, 0)); // Address(rsp, wordSize)
 798 
 799   // rax: local 0
 800   Register recv = rax;
 801   __ push(recv);
 802 
 803   Register rmethod = rbx; // Method* must be rbx for interpreter calling convention
 804   __ movptr(rmethod, AddressLiteral((address)&java_lang_Continuation::stack_method, RelocationHolder::none), NULL);
 805   
 806     // load return address
 807     //const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
 808     ExternalAddress table(entry);
 809     __ movptr(rdx, table);
 810   // push return address
 811   __ push(rdx); // ???? AddressLiteral(__ pc(), RelocationHolder::none)
 812 
 813   // initialize fixed part of activation frame
 814   // __ enter(); // remove if generate_fixed_frame
 815   // generate_fixed_frame(false);
 816 
 817   // __ stop("zzzz");
 818 
 819   __ jump_from_interpreted(rmethod, rax);
 820 
 821 
 822   return entry;
 823 }*/
 824 
 825 
 826 // Method entry for java.lang.Continuation.run.
 827 address TemplateInterpreterGenerator::generate_Continuation_run_entry(void) {
 828   address entry = __ pc();
 829 
 830   const Address constMethod(rbx, Method::const_offset());
 831   // const Address access_flags(rbx, Method::access_flags_offset());
 832 
 833   __ movptr(rdx, constMethod);
 834 
 835   const Address size_of_parameters(rdx,
 836                                    ConstMethod::size_of_parameters_offset());
 837   // const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
 838 
 839   __ load_unsigned_short(rcx, size_of_parameters);
 840   __ subl(rdx, rdx);// rdx = 0 = no. of additional locals
 841 
 842   // rbx: Method*
 843   // rcx: size of parameters
 844   // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
 845 
 846   // see if we've got enough room on the stack for locals plus overhead.
 847   generate_stack_overflow_check();
 848 
 849   // rbx: Method*
 850   // rcx: size of parameters
 851   // rbcp: sender sp
 852   
 853   __ pop(rax); // get return address
 854 
 855   // compute beginning of parameters
 856   __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
 857 
 858   // add 2 zero-initialized slots for native calls
 859   // initialize result_handler slot
 860   __ push((int) NULL_WORD);
 861   // slot for oop temp
 862   // (static native method holder mirror/jni oop result)
 863   __ push((int) NULL_WORD);
 864 
 865   // initialize fixed part of activation frame
 866   generate_fixed_frame(false);
 867 
 868   __ movptr(rax, Address(rlocals, 0)); // Address(rsp, wordSize)
 869 
 870   // rax: local 0
 871   
 872   // const int target_offset = java_lang_Continuation::target_offset;
 873   // guarantee(target_offset > 0, "target offset not initialized");
 874 
 875   // Load the value of the referent field.
 876   // const Address field_address(rax, target_offset);
 877   // __ load_heap_oop(rcx, field_address);
 878 
 879   // const Register thread = rcx;
 880   // __ get_thread(thread);
 881   // __ set_last_Java_frame(thread, noreg, noreg, NULL);
 882   // __ reset_last_Java_frame(thread, false);
 883 
 884   Register rarg1 = rax;
 885   __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::enter_continuation), rarg1);
 886 
 887   // __ get_thread(thread);
 888   // __ reset_last_Java_frame(thread, false);
 889 
 890   __ remove_activation(vtos, rbcp);
 891   __ jmp(rbcp);
 892 
 893   return entry;
 894 }
 895 
 896 // Method entry for java.lang.ref.Reference.get.
 897 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 898 #if INCLUDE_ALL_GCS
 899   // Code: _aload_0, _getfield, _areturn
 900   // parameter size = 1
 901   //
 902   // The code that gets generated by this routine is split into 2 parts:
 903   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 904   //    2. The slow path - which is an expansion of the regular method entry.
 905   //
 906   // Notes:-
 907   // * In the G1 code we do not check whether we need to block for
 908   //   a safepoint. If G1 is enabled then we must execute the specialized
 909   //   code for Reference.get (except when the Reference object is null)
 910   //   so that we can log the value in the referent field with an SATB
 911   //   update buffer.
 912   //   If the code for the getfield template is modified so that the
 913   //   G1 pre-barrier code is executed when the current method is
 914   //   Reference.get() then going through the normal method entry
 915   //   will be fine.


1224     Label L;
1225     __ movptr(rax, Address(method, Method::native_function_offset()));
1226     ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1227     __ cmpptr(rax, unsatisfied.addr());
1228     __ jcc(Assembler::notEqual, L);
1229     __ call_VM(noreg,
1230                CAST_FROM_FN_PTR(address,
1231                                 InterpreterRuntime::prepare_native_call),
1232                method);
1233     __ get_method(method);
1234     __ movptr(rax, Address(method, Method::native_function_offset()));
1235     __ bind(L);
1236   }
1237 
1238   // pass JNIEnv
1239 #ifndef _LP64
1240    __ get_thread(thread);
1241    __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1242    __ movptr(Address(rsp, 0), t);
1243 
1244    __ stop("LOOM XXX"); // assert not called
1245    // set_last_Java_frame_before_call
1246    // It is enough that the pc()
1247    // points into the right code segment. It does not have to be the correct return pc.
1248    __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1249 #else
1250    __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
1251 
1252    // It is enough that the pc() points into the right code
1253    // segment. It does not have to be the correct return pc.
1254    __ set_last_Java_frame(rsp, rbp, (address) __ pc());
1255 #endif // _LP64
1256 
1257   // change thread state
1258 #ifdef ASSERT
1259   {
1260     Label L;
1261     __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1262     __ cmpl(t, _thread_in_Java);
1263     __ jcc(Assembler::equal, L);
1264     __ stop("Wrong thread state in native stub");


1506   // Note: This must happen _after_ handling/throwing any exceptions since
1507   //       the exception handler code notifies the runtime of method exits
1508   //       too. If this happens before, method entry/exit notifications are
1509   //       not properly paired (was bug - gri 11/22/99).
1510   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1511 
1512   // restore potential result in edx:eax, call result handler to
1513   // restore potential result in ST0 & handle result
1514 
1515   __ pop(ltos);
1516   LP64_ONLY( __ pop(dtos));
1517 
1518   __ movptr(t, Address(rbp,
1519                        (frame::interpreter_frame_result_handler_offset) * wordSize));
1520   __ call(t);
1521 
1522   // remove activation
1523   __ movptr(t, Address(rbp,
1524                        frame::interpreter_frame_sender_sp_offset *
1525                        wordSize)); // get sender sp
1526   // __ addq(t, rbp);
1527   __ leave();                                // remove frame anchor
1528   // __ stop("k9k9k9k9");
1529   __ pop(rdi);                               // get return address
1530   __ mov(rsp, t);                            // set sp to sender sp
1531   __ jmp(rdi);
1532 
1533   if (inc_counter) {
1534     // Handle overflow of counter and compile method
1535     __ bind(invocation_counter_overflow);
1536     generate_counter_overflow(continue_after_compile);
1537   }
1538 
1539   return entry_point;
1540 }
1541 
1542 // Abstract method entry
1543 // Attempt to execute abstract method. Throw exception
1544 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
1545 
1546   address entry_point = __ pc();
1547 
1548   // abstract method entry
1549 
1550   //  pop return address, reset last_sp to NULL
1551   __ empty_expression_stack();
1552   __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
1553   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
1554 
1555   // throw exception
1556   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
1557   // the call_VM checks for exception, so we should never return here.
1558   __ should_not_reach_here();
1559 
1560   return entry_point;
1561 }
1562 
1563 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1564   return generate_normal_entry(synchronized, AbstractInterpreter::zerolocals);
1565 }
1566 
1567 //
1568 // Generic interpreted method entry to (asm) interpreter
1569 //
1570 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized, AbstractInterpreter::MethodKind kind) {
1571   // determine code generation flags
1572   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1573 
1574   // ebx: Method*
1575   // rbcp: sender sp (set in InterpreterMacroAssembler::prepare_to_jump_from_interpreted / generate_call_stub)
1576   address entry_point = __ pc();
1577 
1578   const Address constMethod(rbx, Method::const_offset());
1579   const Address access_flags(rbx, Method::access_flags_offset());
1580   const Address size_of_parameters(rdx,
1581                                    ConstMethod::size_of_parameters_offset());
1582   const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
1583 

1584   // get parameter size (always needed)
1585   __ movptr(rdx, constMethod);
1586   __ load_unsigned_short(rcx, size_of_parameters);
1587 
1588   // rbx: Method*
1589   // rcx: size of parameters
1590   // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
1591 
1592   __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1593   __ subl(rdx, rcx); // rdx = no. of additional locals
1594 
1595   // YYY
1596 //   __ incrementl(rdx);
1597 //   __ andl(rdx, -2);
1598 
1599   generate_continuation_frame(false, kind);
1600 
1601   // see if we've got enough room on the stack for locals plus overhead.
1602   generate_stack_overflow_check();
1603 
1604   // get return address
1605   __ pop(rax); // saved from rax in generate_fixed_frame
1606 
1607   if (kind == AbstractInterpreter::java_lang_continuation_enter) {
1608     Register cont = r10;
1609     const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1610     NOT_LP64(__ get_thread(thread));
1611     __ movptr(cont, Address(thread, in_bytes(JavaThread::continuation_offset())));
1612     __ push(cont); // ensure that the first parameter (this) is on the stack
1613   }
1614 
1615   // compute beginning of parameters
1616   __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1617 
1618   // rdx - # of additional locals
1619   // allocate space for locals
1620   // explicitly initialize locals
1621   {
1622     Label exit, loop;
1623     __ testl(rdx, rdx);
1624     __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1625     __ bind(loop);
1626     __ push((int) NULL_WORD); // initialize local variables
1627     __ decrementl(rdx); // until everything initialized
1628     __ jcc(Assembler::greater, loop);
1629     __ bind(exit);
1630   }
1631 
1632   // initialize fixed part of activation frame
1633   generate_fixed_frame(false);
1634 
1635   // if (kind == AbstractInterpreter::java_lang_continuation_getStack) {
1636   //   __ stop("23232323");
1637   // }
1638 
1639   // make sure method is not native & not abstract
1640 #ifdef ASSERT
1641   __ movl(rax, access_flags);
1642   {
1643     Label L;
1644     __ testl(rax, JVM_ACC_NATIVE);
1645     __ jcc(Assembler::zero, L);
1646     __ stop("tried to execute native method as non-native");
1647     __ bind(L);
1648   }
1649   {
1650     Label L;
1651     __ testl(rax, JVM_ACC_ABSTRACT);
1652     __ jcc(Assembler::zero, L);
1653     __ stop("tried to execute abstract method in interpreter");
1654     __ bind(L);
1655   }
1656 #endif
1657 
1658   // Since at this point in the method invocation the exception


1667         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1668   __ movbool(do_not_unlock_if_synchronized, true);
1669 
1670   __ profile_parameters_type(rax, rcx, rdx);
1671   // increment invocation count & check for overflow
1672   Label invocation_counter_overflow;
1673   Label profile_method;
1674   Label profile_method_continue;
1675   if (inc_counter) {
1676     generate_counter_incr(&invocation_counter_overflow,
1677                           &profile_method,
1678                           &profile_method_continue);
1679     if (ProfileInterpreter) {
1680       __ bind(profile_method_continue);
1681     }
1682   }
1683 
1684   Label continue_after_compile;
1685   __ bind(continue_after_compile);
1686 
1687   Label no_cont;
1688   __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::continuation_offset())));
1689   __ testl(r11, r11);
1690   __ jcc(Assembler::zero, no_cont);
1691 
1692   // check for synchronized interpreted methods
1693   bang_stack_shadow_pages(false);
1694 
1695   __ bind(no_cont);
1696 
1697   // reset the _do_not_unlock_if_synchronized flag
1698   NOT_LP64(__ get_thread(thread));
1699   __ movbool(do_not_unlock_if_synchronized, false);
1700 
1701   // check for synchronized methods
1702   // Must happen AFTER invocation_counter check and stack overflow check,
1703   // so method is not locked if overflows.
1704   if (synchronized) {
1705     // Allocate monitor and lock method
1706     lock_method();
1707   } else {
1708     // no synchronization necessary
1709 #ifdef ASSERT
1710     {
1711       Label L;
1712       __ movl(rax, access_flags);
1713       __ testl(rax, JVM_ACC_SYNCHRONIZED);
1714       __ jcc(Assembler::zero, L);
1715       __ stop("method needs synchronization");
1716       __ bind(L);


1718 #endif
1719   }
1720 
1721   // start execution
1722 #ifdef ASSERT
1723   {
1724     Label L;
1725      const Address monitor_block_top (rbp,
1726                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
1727     __ movptr(rax, monitor_block_top);
1728     __ cmpptr(rax, rsp);
1729     __ jcc(Assembler::equal, L);
1730     __ stop("broken stack frame setup in interpreter");
1731     __ bind(L);
1732   }
1733 #endif
1734 
1735   // jvmti support
1736   __ notify_method_entry();
1737 
1738   // if (kind == AbstractInterpreter::java_lang_continuation_getStack) {
1739   //   __ stop("78787878787");
1740   // }
1741 
1742   __ dispatch_next(vtos);
1743 
1744   if (kind == AbstractInterpreter::java_lang_continuation_getStack) {
1745     __ stop("9090909090909");
1746   }
1747 
1748   // invocation counter overflow
1749   if (inc_counter) {
1750     if (ProfileInterpreter) {
1751       // We have decided to profile this method in the interpreter
1752       __ bind(profile_method);
1753       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1754       __ set_method_data_pointer_for_bcp();
1755       __ get_method(rbx);
1756       __ jmp(profile_method_continue);
1757     }
1758     // Handle overflow of counter and compile method
1759     __ bind(invocation_counter_overflow);
1760     generate_counter_overflow(continue_after_compile);
1761   }
1762 
1763   return entry_point;
1764 }
1765 
1766 //-----------------------------------------------------------------------------
1767 // Exceptions


< prev index next >