< prev index next >

src/hotspot/cpu/sparc/interp_masm_sparc.cpp

Print this page
rev 47415 : Add Thread Local handshakes and thread local polling


  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interp_masm_sparc.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/interpreterRuntime.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/arrayOop.hpp"
  31 #include "oops/markOop.hpp"
  32 #include "oops/methodData.hpp"
  33 #include "oops/method.hpp"
  34 #include "oops/methodCounters.hpp"
  35 #include "prims/jvmtiExport.hpp"
  36 #include "prims/jvmtiThreadState.hpp"
  37 #include "runtime/basicLock.hpp"
  38 #include "runtime/biasedLocking.hpp"

  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/thread.inline.hpp"
  41 #include "utilities/align.hpp"
  42 
  43 // Implementation of InterpreterMacroAssembler
  44 
  45 // This file specializes the assember with interpreter-specific macros
  46 
  47 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
  48 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
  49 
  50 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  51   assert(entry, "Entry must have been generated by now");
  52   AddressLiteral al(entry);
  53   jump_to(al, G3_scratch);
  54   delayed()->nop();
  55 }
  56 
  57 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
  58   // Note: this algorithm is also used by C1's OSR entry sequence.


 251 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
 252   dispatch_base(state, Interpreter::normal_table(state));
 253 }
 254 
 255 
 256 void InterpreterMacroAssembler::dispatch_only(TosState state) {
 257   dispatch_base(state, Interpreter::dispatch_table(state));
 258 }
 259 
 260 
 261 // common code to dispatch and dispatch_only
 262 // dispatch value in Lbyte_code and increment Lbcp
 263 
 264 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
 265   verify_FPU(1, state);
 266   // %%%%% maybe implement +VerifyActivationFrameSize here
 267   //verify_thread(); //too slow; we will just verify on method entry & exit
 268   if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 269   // dispatch table to use
 270   AddressLiteral tbl(table);
 271   sll(Lbyte_code, LogBytesPerWord, Lbyte_code);       // multiply by wordSize

















 272   set(tbl, G3_scratch);                               // compute addr of table


 273   ld_ptr(G3_scratch, Lbyte_code, G3_scratch);         // get entry addr
 274   jmp( G3_scratch, 0 );
 275   if (bcp_incr != 0)  delayed()->inc(Lbcp, bcp_incr);
 276   else                delayed()->nop();
 277 }
 278 
 279 
 280 // Helpers for expression stack
 281 
 282 // Longs and doubles are Category 2 computational types in the
 283 // JVM specification (section 3.11.1) and take 2 expression stack or
 284 // local slots.
 285 // Aligning them on 32 bit with tagged stacks is hard because the code generated
 286 // for the dup* bytecodes depends on what types are already on the stack.
 287 // If the types are split into the two stack/local slots, that is much easier
 288 // (and we can use 0 for non-reference tags).
 289 
 290 // Known good alignment in _LP64 but unknown otherwise
 291 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
 292   assert_not_delayed();




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interp_masm_sparc.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/interpreterRuntime.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/arrayOop.hpp"
  31 #include "oops/markOop.hpp"
  32 #include "oops/methodData.hpp"
  33 #include "oops/method.hpp"
  34 #include "oops/methodCounters.hpp"
  35 #include "prims/jvmtiExport.hpp"
  36 #include "prims/jvmtiThreadState.hpp"
  37 #include "runtime/basicLock.hpp"
  38 #include "runtime/biasedLocking.hpp"
  39 #include "runtime/safepointMechanism.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #include "utilities/align.hpp"
  43 
  44 // Implementation of InterpreterMacroAssembler
  45 
  46 // This file specializes the assember with interpreter-specific macros
  47 
  48 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
  49 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
  50 
  51 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  52   assert(entry, "Entry must have been generated by now");
  53   AddressLiteral al(entry);
  54   jump_to(al, G3_scratch);
  55   delayed()->nop();
  56 }
  57 
  58 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
  59   // Note: this algorithm is also used by C1's OSR entry sequence.


 252 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
 253   dispatch_base(state, Interpreter::normal_table(state));
 254 }
 255 
 256 
 257 void InterpreterMacroAssembler::dispatch_only(TosState state) {
 258   dispatch_base(state, Interpreter::dispatch_table(state));
 259 }
 260 
 261 
 262 // common code to dispatch and dispatch_only
 263 // dispatch value in Lbyte_code and increment Lbcp
 264 
 265 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
 266   verify_FPU(1, state);
 267   // %%%%% maybe implement +VerifyActivationFrameSize here
 268   //verify_thread(); //too slow; we will just verify on method entry & exit
 269   if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 270   // dispatch table to use
 271   AddressLiteral tbl(table);
 272   Label dispatch;
 273 
 274   if (SafepointMechanism::uses_thread_local_poll()) {
 275     AddressLiteral sfpt_tbl(Interpreter::safept_table(state));
 276     Label no_safepoint;
 277 
 278     if (tbl.value() != sfpt_tbl.value()) {
 279       ldx(Address(G2_thread, Thread::polling_page_offset()), G3_scratch, 0);
 280       // Armed page has poll_bit set, if poll bit is cleared just continue.
 281       and3(G3_scratch, SafepointMechanism::poll_bit(), G3_scratch);
 282 
 283       br_null_short(G3_scratch, Assembler::pt, no_safepoint);
 284       set(sfpt_tbl, G3_scratch);
 285       ba_short(dispatch);
 286     }
 287     bind(no_safepoint);
 288   }
 289 
 290   set(tbl, G3_scratch);                               // compute addr of table
 291   bind(dispatch);
 292   sll(Lbyte_code, LogBytesPerWord, Lbyte_code);       // multiply by wordSize
 293   ld_ptr(G3_scratch, Lbyte_code, G3_scratch);         // get entry addr
 294   jmp( G3_scratch, 0 );
 295   if (bcp_incr != 0)  delayed()->inc(Lbcp, bcp_incr);
 296   else                delayed()->nop();
 297 }
 298 
 299 
 300 // Helpers for expression stack
 301 
 302 // Longs and doubles are Category 2 computational types in the
 303 // JVM specification (section 3.11.1) and take 2 expression stack or
 304 // local slots.
 305 // Aligning them on 32 bit with tagged stacks is hard because the code generated
 306 // for the dup* bytecodes depends on what types are already on the stack.
 307 // If the types are split into the two stack/local slots, that is much easier
 308 // (and we can use 0 for non-reference tags).
 309 
 310 // Known good alignment in _LP64 but unknown otherwise
 311 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
 312   assert_not_delayed();


< prev index next >