1 /* 2 * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/cppInterpreter.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterGenerator.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/interfaceSupport.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 #ifdef SHARK 50 #include "shark/shark_globals.hpp" 51 #endif 52 53 #ifdef CC_INTERP 54 55 // Routine exists to make tracebacks look decent in debugger 56 // while we are recursed in the frame manager/c++ interpreter. 57 // We could use an address in the frame manager but having 58 // frames look natural in the debugger is a plus. 59 extern "C" void RecursiveInterpreterActivation(interpreterState istate ) 60 { 61 // 62 ShouldNotReachHere(); 63 } 64 65 66 #define __ _masm-> 67 #define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name))) 68 69 // default registers for state and sender_sp 70 // state and sender_sp are the same on 32bit because we have no choice. 71 // state could be rsi on 64bit but it is an arg reg and not callee save 72 // so r13 is better choice. 73 74 const Register state = NOT_LP64(rsi) LP64_ONLY(r13); 75 const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13); 76 77 // NEEDED for JVMTI? 78 // address AbstractInterpreter::_remove_activation_preserving_args_entry; 79 80 static address unctrap_frame_manager_entry = NULL; 81 82 static address deopt_frame_manager_return_atos = NULL; 83 static address deopt_frame_manager_return_btos = NULL; 84 static address deopt_frame_manager_return_itos = NULL; 85 static address deopt_frame_manager_return_ltos = NULL; 86 static address deopt_frame_manager_return_ftos = NULL; 87 static address deopt_frame_manager_return_dtos = NULL; 88 static address deopt_frame_manager_return_vtos = NULL; 89 90 int AbstractInterpreter::BasicType_as_index(BasicType type) { 91 int i = 0; 92 switch (type) { 93 case T_BOOLEAN: i = 0; break; 94 case T_CHAR : i = 1; break; 95 case T_BYTE : i = 2; break; 96 case T_SHORT : i = 3; break; 97 case T_INT : i = 4; break; 98 case T_VOID : i = 5; break; 99 case T_FLOAT : i = 8; break; 100 case T_LONG : i = 9; break; 101 case T_DOUBLE : i = 6; break; 102 case T_OBJECT : // fall through 103 case T_ARRAY : i = 7; break; 104 default : ShouldNotReachHere(); 105 } 106 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds"); 107 return i; 108 } 109 110 // Is this pc anywhere within code owned by the interpreter? 111 // This only works for pc that might possibly be exposed to frame 112 // walkers. It clearly misses all of the actual c++ interpreter 113 // implementation 114 bool CppInterpreter::contains(address pc) { 115 return (_code->contains(pc) || 116 pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation)); 117 } 118 119 120 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) { 121 address entry = __ pc(); 122 switch (type) { 123 case T_BOOLEAN: __ c2bool(rax); break; 124 case T_CHAR : __ andl(rax, 0xFFFF); break; 125 case T_BYTE : __ sign_extend_byte (rax); break; 126 case T_SHORT : __ sign_extend_short(rax); break; 127 case T_VOID : // fall thru 128 case T_LONG : // fall thru 129 case T_INT : /* nothing to do */ break; 130 131 case T_DOUBLE : 132 case T_FLOAT : 133 { 134 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 135 __ pop(t); // remove return address first 136 // Must return a result for interpreter or compiler. In SSE 137 // mode, results are returned in xmm0 and the FPU stack must 138 // be empty. 139 if (type == T_FLOAT && UseSSE >= 1) { 140 #ifndef _LP64 141 // Load ST0 142 __ fld_d(Address(rsp, 0)); 143 // Store as float and empty fpu stack 144 __ fstp_s(Address(rsp, 0)); 145 #endif // !_LP64 146 // and reload 147 __ movflt(xmm0, Address(rsp, 0)); 148 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 149 __ movdbl(xmm0, Address(rsp, 0)); 150 } else { 151 // restore ST0 152 __ fld_d(Address(rsp, 0)); 153 } 154 // and pop the temp 155 __ addptr(rsp, 2 * wordSize); 156 __ push(t); // restore return address 157 } 158 break; 159 case T_OBJECT : 160 // retrieve result from frame 161 __ movptr(rax, STATE(_oop_temp)); 162 // and verify it 163 __ verify_oop(rax); 164 break; 165 default : ShouldNotReachHere(); 166 } 167 __ ret(0); // return from result handler 168 return entry; 169 } 170 171 // tosca based result to c++ interpreter stack based result. 172 // Result goes to top of native stack. 173 174 #undef EXTEND // SHOULD NOT BE NEEDED 175 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) { 176 // A result is in the tosca (abi result) from either a native method call or compiled 177 // code. Place this result on the java expression stack so C++ interpreter can use it. 178 address entry = __ pc(); 179 180 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 181 __ pop(t); // remove return address first 182 switch (type) { 183 case T_VOID: 184 break; 185 case T_BOOLEAN: 186 #ifdef EXTEND 187 __ c2bool(rax); 188 #endif 189 __ push(rax); 190 break; 191 case T_CHAR : 192 #ifdef EXTEND 193 __ andl(rax, 0xFFFF); 194 #endif 195 __ push(rax); 196 break; 197 case T_BYTE : 198 #ifdef EXTEND 199 __ sign_extend_byte (rax); 200 #endif 201 __ push(rax); 202 break; 203 case T_SHORT : 204 #ifdef EXTEND 205 __ sign_extend_short(rax); 206 #endif 207 __ push(rax); 208 break; 209 case T_LONG : 210 __ push(rdx); // pushes useless junk on 64bit 211 __ push(rax); 212 break; 213 case T_INT : 214 __ push(rax); 215 break; 216 case T_FLOAT : 217 // Result is in ST(0)/xmm0 218 __ subptr(rsp, wordSize); 219 if ( UseSSE < 1) { 220 __ fstp_s(Address(rsp, 0)); 221 } else { 222 __ movflt(Address(rsp, 0), xmm0); 223 } 224 break; 225 case T_DOUBLE : 226 __ subptr(rsp, 2*wordSize); 227 if ( UseSSE < 2 ) { 228 __ fstp_d(Address(rsp, 0)); 229 } else { 230 __ movdbl(Address(rsp, 0), xmm0); 231 } 232 break; 233 case T_OBJECT : 234 __ verify_oop(rax); // verify it 235 __ push(rax); 236 break; 237 default : ShouldNotReachHere(); 238 } 239 __ jmp(t); // return from result handler 240 return entry; 241 } 242 243 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) { 244 // A result is in the java expression stack of the interpreted method that has just 245 // returned. Place this result on the java expression stack of the caller. 246 // 247 // The current interpreter activation in rsi/r13 is for the method just returning its 248 // result. So we know that the result of this method is on the top of the current 249 // execution stack (which is pre-pushed) and will be return to the top of the caller 250 // stack. The top of the callers stack is the bottom of the locals of the current 251 // activation. 252 // Because of the way activation are managed by the frame manager the value of rsp is 253 // below both the stack top of the current activation and naturally the stack top 254 // of the calling activation. This enable this routine to leave the return address 255 // to the frame manager on the stack and do a vanilla return. 256 // 257 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result 258 // On Return: rsi/r13 - unchanged 259 // rax - new stack top for caller activation (i.e. activation in _prev_link) 260 // 261 // Can destroy rdx, rcx. 262 // 263 264 address entry = __ pc(); 265 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 266 switch (type) { 267 case T_VOID: 268 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value 269 __ addptr(rax, wordSize); // account for prepush before we return 270 break; 271 case T_FLOAT : 272 case T_BOOLEAN: 273 case T_CHAR : 274 case T_BYTE : 275 case T_SHORT : 276 case T_INT : 277 // 1 word result 278 __ movptr(rdx, STATE(_stack)); 279 __ movptr(rax, STATE(_locals)); // address for result 280 __ movl(rdx, Address(rdx, wordSize)); // get result 281 __ movptr(Address(rax, 0), rdx); // and store it 282 break; 283 case T_LONG : 284 case T_DOUBLE : 285 // return top two words on current expression stack to caller's expression stack 286 // The caller's expression stack is adjacent to the current frame manager's intepretState 287 // except we allocated one extra word for this intepretState so we won't overwrite it 288 // when we return a two word result. 289 290 __ movptr(rax, STATE(_locals)); // address for result 291 __ movptr(rcx, STATE(_stack)); 292 __ subptr(rax, wordSize); // need addition word besides locals[0] 293 __ movptr(rdx, Address(rcx, 2*wordSize)); // get result word (junk in 64bit) 294 __ movptr(Address(rax, wordSize), rdx); // and store it 295 __ movptr(rdx, Address(rcx, wordSize)); // get result word 296 __ movptr(Address(rax, 0), rdx); // and store it 297 break; 298 case T_OBJECT : 299 __ movptr(rdx, STATE(_stack)); 300 __ movptr(rax, STATE(_locals)); // address for result 301 __ movptr(rdx, Address(rdx, wordSize)); // get result 302 __ verify_oop(rdx); // verify it 303 __ movptr(Address(rax, 0), rdx); // and store it 304 break; 305 default : ShouldNotReachHere(); 306 } 307 __ ret(0); 308 return entry; 309 } 310 311 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) { 312 // A result is in the java expression stack of the interpreted method that has just 313 // returned. Place this result in the native abi that the caller expects. 314 // 315 // Similar to generate_stack_to_stack_converter above. Called at a similar time from the 316 // frame manager execept in this situation the caller is native code (c1/c2/call_stub) 317 // and so rather than return result onto caller's java expression stack we return the 318 // result in the expected location based on the native abi. 319 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result 320 // On Return: rsi/r13 - unchanged 321 // Other registers changed [rax/rdx/ST(0) as needed for the result returned] 322 323 address entry = __ pc(); 324 switch (type) { 325 case T_VOID: 326 break; 327 case T_BOOLEAN: 328 case T_CHAR : 329 case T_BYTE : 330 case T_SHORT : 331 case T_INT : 332 __ movptr(rdx, STATE(_stack)); // get top of stack 333 __ movl(rax, Address(rdx, wordSize)); // get result word 1 334 break; 335 case T_LONG : 336 __ movptr(rdx, STATE(_stack)); // get top of stack 337 __ movptr(rax, Address(rdx, wordSize)); // get result low word 338 NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));) // get result high word 339 break; 340 case T_FLOAT : 341 __ movptr(rdx, STATE(_stack)); // get top of stack 342 if ( UseSSE >= 1) { 343 __ movflt(xmm0, Address(rdx, wordSize)); 344 } else { 345 __ fld_s(Address(rdx, wordSize)); // pushd float result 346 } 347 break; 348 case T_DOUBLE : 349 __ movptr(rdx, STATE(_stack)); // get top of stack 350 if ( UseSSE > 1) { 351 __ movdbl(xmm0, Address(rdx, wordSize)); 352 } else { 353 __ fld_d(Address(rdx, wordSize)); // push double result 354 } 355 break; 356 case T_OBJECT : 357 __ movptr(rdx, STATE(_stack)); // get top of stack 358 __ movptr(rax, Address(rdx, wordSize)); // get result word 1 359 __ verify_oop(rax); // verify it 360 break; 361 default : ShouldNotReachHere(); 362 } 363 __ ret(0); 364 return entry; 365 } 366 367 address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { 368 // make it look good in the debugger 369 return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation); 370 } 371 372 address CppInterpreter::deopt_entry(TosState state, int length) { 373 address ret = NULL; 374 if (length != 0) { 375 switch (state) { 376 case atos: ret = deopt_frame_manager_return_atos; break; 377 case btos: ret = deopt_frame_manager_return_btos; break; 378 case ctos: 379 case stos: 380 case itos: ret = deopt_frame_manager_return_itos; break; 381 case ltos: ret = deopt_frame_manager_return_ltos; break; 382 case ftos: ret = deopt_frame_manager_return_ftos; break; 383 case dtos: ret = deopt_frame_manager_return_dtos; break; 384 case vtos: ret = deopt_frame_manager_return_vtos; break; 385 } 386 } else { 387 ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap) 388 } 389 assert(ret != NULL, "Not initialized"); 390 return ret; 391 } 392 393 // C++ Interpreter 394 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state, 395 const Register locals, 396 const Register sender_sp, 397 bool native) { 398 399 // On entry the "locals" argument points to locals[0] (or where it would be in case no locals in 400 // a static method). "state" contains any previous frame manager state which we must save a link 401 // to in the newly generated state object. On return "state" is a pointer to the newly allocated 402 // state object. We must allocate and initialize a new interpretState object and the method 403 // expression stack. Because the returned result (if any) of the method will be placed on the caller's 404 // expression stack and this will overlap with locals[0] (and locals[1] if double/long) we must 405 // be sure to leave space on the caller's stack so that this result will not overwrite values when 406 // locals[0] and locals[1] do not exist (and in fact are return address and saved rbp). So when 407 // we are non-native we in essence ensure that locals[0-1] exist. We play an extra trick in 408 // non-product builds and initialize this last local with the previous interpreterState as 409 // this makes things look real nice in the debugger. 410 411 // State on entry 412 // Assumes locals == &locals[0] 413 // Assumes state == any previous frame manager state (assuming call path from c++ interpreter) 414 // Assumes rax = return address 415 // rcx == senders_sp 416 // rbx == method 417 // Modifies rcx, rdx, rax 418 // Returns: 419 // state == address of new interpreterState 420 // rsp == bottom of method's expression stack. 421 422 const Address const_offset (rbx, Method::const_offset()); 423 424 425 // On entry sp is the sender's sp. This includes the space for the arguments 426 // that the sender pushed. If the sender pushed no args (a static) and the 427 // caller returns a long then we need two words on the sender's stack which 428 // are not present (although when we return a restore full size stack the 429 // space will be present). If we didn't allocate two words here then when 430 // we "push" the result of the caller's stack we would overwrite the return 431 // address and the saved rbp. Not good. So simply allocate 2 words now 432 // just to be safe. This is the "static long no_params() method" issue. 433 // See Lo.java for a testcase. 434 // We don't need this for native calls because they return result in 435 // register and the stack is expanded in the caller before we store 436 // the results on the stack. 437 438 if (!native) { 439 #ifdef PRODUCT 440 __ subptr(rsp, 2*wordSize); 441 #else /* PRODUCT */ 442 __ push((int32_t)NULL_WORD); 443 __ push(state); // make it look like a real argument 444 #endif /* PRODUCT */ 445 } 446 447 // Now that we are assure of space for stack result, setup typical linkage 448 449 __ push(rax); 450 __ enter(); 451 452 __ mov(rax, state); // save current state 453 454 __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter))); 455 __ mov(state, rsp); 456 457 // rsi/r13 == state/locals rax == prevstate 458 459 // initialize the "shadow" frame so that use since C++ interpreter not directly 460 // recursive. Simpler to recurse but we can't trim expression stack as we call 461 // new methods. 462 __ movptr(STATE(_locals), locals); // state->_locals = locals() 463 __ movptr(STATE(_self_link), state); // point to self 464 __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state) 465 __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp 466 #ifdef _LP64 467 __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes() 468 #else 469 __ get_thread(rax); // get vm's javathread* 470 __ movptr(STATE(_thread), rax); // state->_bcp = codes() 471 #endif // _LP64 472 __ movptr(rdx, Address(rbx, Method::const_offset())); // get constantMethodOop 473 __ lea(rdx, Address(rdx, ConstMethod::codes_offset())); // get code base 474 if (native) { 475 __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL 476 } else { 477 __ movptr(STATE(_bcp), rdx); // state->_bcp = codes() 478 } 479 __ xorptr(rdx, rdx); 480 __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native) 481 __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL 482 __ movptr(rdx, Address(rbx, Method::const_offset())); 483 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 484 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 485 __ movptr(STATE(_constants), rdx); // state->_constants = constants() 486 487 __ movptr(STATE(_method), rbx); // state->_method = method() 488 __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry 489 __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL 490 491 492 __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0] 493 // entries run from -1..x where &monitor[x] == 494 495 { 496 // Must not attempt to lock method until we enter interpreter as gc won't be able to find the 497 // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack 498 // immediately. 499 500 // synchronize method 501 const Address access_flags (rbx, Method::access_flags_offset()); 502 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 503 Label not_synced; 504 505 __ movl(rax, access_flags); 506 __ testl(rax, JVM_ACC_SYNCHRONIZED); 507 __ jcc(Assembler::zero, not_synced); 508 509 // Allocate initial monitor and pre initialize it 510 // get synchronization object 511 512 Label done; 513 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 514 __ movl(rax, access_flags); 515 __ testl(rax, JVM_ACC_STATIC); 516 __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case) 517 __ jcc(Assembler::zero, done); 518 __ movptr(rax, Address(rbx, Method::const_offset())); 519 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); 520 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes())); 521 __ movptr(rax, Address(rax, mirror_offset)); 522 __ bind(done); 523 // add space for monitor & lock 524 __ subptr(rsp, entry_size); // add space for a monitor entry 525 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object 526 __ bind(not_synced); 527 } 528 529 __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count]) 530 if (native) { 531 __ movptr(STATE(_stack), rsp); // set current expression stack tos 532 __ movptr(STATE(_stack_limit), rsp); 533 } else { 534 __ subptr(rsp, wordSize); // pre-push stack 535 __ movptr(STATE(_stack), rsp); // set current expression stack tos 536 537 // compute full expression stack limit 538 539 __ movptr(rdx, Address(rbx, Method::const_offset())); 540 __ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words 541 __ negptr(rdx); // so we can subtract in next step 542 // Allocate expression stack 543 __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -Method::extra_stack_words())); 544 __ movptr(STATE(_stack_limit), rsp); 545 } 546 547 #ifdef _LP64 548 // Make sure stack is properly aligned and sized for the abi 549 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 550 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 551 #endif // _LP64 552 553 554 555 } 556 557 // Helpers for commoning out cases in the various type of method entries. 558 // 559 560 // increment invocation count & check for overflow 561 // 562 // Note: checking for negative value instead of overflow 563 // so we have a 'sticky' overflow test 564 // 565 // rbx,: method 566 // rcx: invocation counter 567 // 568 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 569 Label done; 570 const Address invocation_counter(rax, 571 MethodCounters::invocation_counter_offset() + 572 InvocationCounter::counter_offset()); 573 const Address backedge_counter (rax, 574 MethodCounters::backedge_counter_offset() + 575 InvocationCounter::counter_offset()); 576 577 __ get_method_counters(rbx, rax, done); 578 579 if (ProfileInterpreter) { 580 __ incrementl(Address(rax, 581 MethodCounters::interpreter_invocation_counter_offset())); 582 } 583 // Update standard invocation counters 584 __ movl(rcx, invocation_counter); 585 __ increment(rcx, InvocationCounter::count_increment); 586 __ movl(invocation_counter, rcx); // save invocation count 587 588 __ movl(rax, backedge_counter); // load backedge counter 589 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 590 591 __ addl(rcx, rax); // add both counters 592 593 // profile_method is non-null only for interpreted method so 594 // profile_method != NULL == !native_call 595 // BytecodeInterpreter only calls for native so code is elided. 596 597 __ cmp32(rcx, 598 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 599 __ jcc(Assembler::aboveEqual, *overflow); 600 __ bind(done); 601 } 602 603 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 604 605 // C++ interpreter on entry 606 // rsi/r13 - new interpreter state pointer 607 // rbp - interpreter frame pointer 608 // rbx - method 609 610 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 611 // rbx, - method 612 // rcx - rcvr (assuming there is one) 613 // top of stack return address of interpreter caller 614 // rsp - sender_sp 615 616 // C++ interpreter only 617 // rsi/r13 - previous interpreter state pointer 618 619 // InterpreterRuntime::frequency_counter_overflow takes one argument 620 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 621 // The call returns the address of the verified entry point for the method or NULL 622 // if the compilation did not complete (either went background or bailed out). 623 __ movptr(rax, (int32_t)false); 624 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); 625 626 // for c++ interpreter can rsi really be munged? 627 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); // restore state 628 __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method))); // restore method 629 __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer 630 631 __ jmp(*do_continue, relocInfo::none); 632 633 } 634 635 void InterpreterGenerator::generate_stack_overflow_check(void) { 636 // see if we've got enough room on the stack for locals plus overhead. 637 // the expression stack grows down incrementally, so the normal guard 638 // page mechanism will work for that. 639 // 640 // Registers live on entry: 641 // 642 // Asm interpreter 643 // rdx: number of additional locals this frame needs (what we must check) 644 // rbx,: Method* 645 646 // C++ Interpreter 647 // rsi/r13: previous interpreter frame state object 648 // rdi: &locals[0] 649 // rcx: # of locals 650 // rdx: number of additional locals this frame needs (what we must check) 651 // rbx: Method* 652 653 // destroyed on exit 654 // rax, 655 656 // NOTE: since the additional locals are also always pushed (wasn't obvious in 657 // generate_method_entry) so the guard should work for them too. 658 // 659 660 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 661 662 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). 663 // be sure to change this if you add/subtract anything to/from the overhead area 664 const int overhead_size = (int)sizeof(BytecodeInterpreter); 665 666 const int page_size = os::vm_page_size(); 667 668 Label after_frame_check; 669 670 // compute rsp as if this were going to be the last frame on 671 // the stack before the red zone 672 673 Label after_frame_check_pop; 674 675 // save rsi == caller's bytecode ptr (c++ previous interp. state) 676 // QQQ problem here?? rsi overload???? 677 __ push(state); 678 679 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi); 680 681 NOT_LP64(__ get_thread(thread)); 682 683 const Address stack_base(thread, Thread::stack_base_offset()); 684 const Address stack_size(thread, Thread::stack_size_offset()); 685 686 // locals + overhead, in bytes 687 // Always give one monitor to allow us to start interp if sync method. 688 // Any additional monitors need a check when moving the expression stack 689 const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize; 690 __ movptr(rax, Address(rbx, Method::const_offset())); 691 __ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words 692 __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor+Method::extra_stack_words())); 693 __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size)); 694 695 #ifdef ASSERT 696 Label stack_base_okay, stack_size_okay; 697 // verify that thread stack base is non-zero 698 __ cmpptr(stack_base, (int32_t)0); 699 __ jcc(Assembler::notEqual, stack_base_okay); 700 __ stop("stack base is zero"); 701 __ bind(stack_base_okay); 702 // verify that thread stack size is non-zero 703 __ cmpptr(stack_size, (int32_t)0); 704 __ jcc(Assembler::notEqual, stack_size_okay); 705 __ stop("stack size is zero"); 706 __ bind(stack_size_okay); 707 #endif 708 709 // Add stack base to locals and subtract stack size 710 __ addptr(rax, stack_base); 711 __ subptr(rax, stack_size); 712 713 // We should have a magic number here for the size of the c++ interpreter frame. 714 // We can't actually tell this ahead of time. The debug version size is around 3k 715 // product is 1k and fastdebug is 4k 716 const int slop = 6 * K; 717 718 // Use the maximum number of pages we might bang. 719 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 720 (StackRedPages+StackYellowPages); 721 // Only need this if we are stack banging which is temporary while 722 // we're debugging. 723 __ addptr(rax, slop + 2*max_pages * page_size); 724 725 // check against the current stack bottom 726 __ cmpptr(rsp, rax); 727 __ jcc(Assembler::above, after_frame_check_pop); 728 729 __ pop(state); // get c++ prev state. 730 731 // throw exception return address becomes throwing pc 732 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 733 734 // all done with frame size check 735 __ bind(after_frame_check_pop); 736 __ pop(state); 737 738 __ bind(after_frame_check); 739 } 740 741 // Find preallocated monitor and lock method (C++ interpreter) 742 // rbx - Method* 743 // 744 void CppInterpreterGenerator::lock_method() { 745 // assumes state == rsi/r13 == pointer to current interpreterState 746 // minimally destroys rax, rdx|c_rarg1, rdi 747 // 748 // synchronize method 749 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 750 const Address access_flags (rbx, Method::access_flags_offset()); 751 752 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 753 754 // find initial monitor i.e. monitors[-1] 755 __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit 756 __ subptr(monitor, entry_size); // point to initial monitor 757 758 #ifdef ASSERT 759 { Label L; 760 __ movl(rax, access_flags); 761 __ testl(rax, JVM_ACC_SYNCHRONIZED); 762 __ jcc(Assembler::notZero, L); 763 __ stop("method doesn't need synchronization"); 764 __ bind(L); 765 } 766 #endif // ASSERT 767 // get synchronization object 768 { Label done; 769 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 770 __ movl(rax, access_flags); 771 __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case) 772 __ testl(rax, JVM_ACC_STATIC); 773 __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case) 774 __ jcc(Assembler::zero, done); 775 __ movptr(rax, Address(rbx, Method::const_offset())); 776 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); 777 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes())); 778 __ movptr(rax, Address(rax, mirror_offset)); 779 __ bind(done); 780 } 781 #ifdef ASSERT 782 { Label L; 783 __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object? 784 __ jcc(Assembler::equal, L); 785 __ stop("wrong synchronization lobject"); 786 __ bind(L); 787 } 788 #endif // ASSERT 789 // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi! 790 __ lock_object(monitor); 791 } 792 793 address InterpreterGenerator::generate_Reference_get_entry(void) { 794 #if INCLUDE_ALL_GCS 795 if (UseG1GC) { 796 // We need to generate have a routine that generates code to: 797 // * load the value in the referent field 798 // * passes that value to the pre-barrier. 799 // 800 // In the case of G1 this will record the value of the 801 // referent in an SATB buffer if marking is active. 802 // This will cause concurrent marking to mark the referent 803 // field as live. 804 Unimplemented(); 805 } 806 #endif // INCLUDE_ALL_GCS 807 808 // If G1 is not enabled then attempt to go through the accessor entry point 809 // Reference.get is an accessor 810 return NULL; 811 } 812 813 // 814 // C++ Interpreter stub for calling a native method. 815 // This sets up a somewhat different looking stack for calling the native method 816 // than the typical interpreter frame setup but still has the pointer to 817 // an interpreter state. 818 // 819 820 address InterpreterGenerator::generate_native_entry(bool synchronized) { 821 // determine code generation flags 822 bool inc_counter = UseCompiler || CountCompiledCalls; 823 824 // rbx: Method* 825 // rcx: receiver (unused) 826 // rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve 827 // in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless 828 // to save/restore. 829 address entry_point = __ pc(); 830 831 const Address access_flags (rbx, Method::access_flags_offset()); 832 833 // rsi/r13 == state/locals rdi == prevstate 834 const Register locals = rdi; 835 836 // get parameter size (always needed) 837 { 838 const Address constMethod (rbx, Method::const_offset()); 839 const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset()); 840 __ movptr(rcx, constMethod); 841 __ load_unsigned_short(rcx, size_of_parameters); 842 } 843 844 // rbx: Method* 845 // rcx: size of parameters 846 __ pop(rax); // get return address 847 // for natives the size of locals is zero 848 849 // compute beginning of parameters /locals 850 851 __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize)); 852 853 // initialize fixed part of activation frame 854 855 // Assumes rax = return address 856 857 // allocate and initialize new interpreterState and method expression stack 858 // IN(locals) -> locals 859 // IN(state) -> previous frame manager state (NULL from stub/c1/c2) 860 // destroys rax, rcx, rdx 861 // OUT (state) -> new interpreterState 862 // OUT(rsp) -> bottom of methods expression stack 863 864 // save sender_sp 865 __ mov(rcx, sender_sp_on_entry); 866 // start with NULL previous state 867 __ movptr(state, (int32_t)NULL_WORD); 868 generate_compute_interpreter_state(state, locals, rcx, true); 869 870 #ifdef ASSERT 871 { Label L; 872 __ movptr(rax, STATE(_stack_base)); 873 #ifdef _LP64 874 // duplicate the alignment rsp got after setting stack_base 875 __ subptr(rax, frame::arg_reg_save_area_bytes); // windows 876 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI) 877 #endif // _LP64 878 __ cmpptr(rax, rsp); 879 __ jcc(Assembler::equal, L); 880 __ stop("broken stack frame setup in interpreter"); 881 __ bind(L); 882 } 883 #endif 884 885 const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax); 886 NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread 887 // Since at this point in the method invocation the exception handler 888 // would try to exit the monitor of synchronized methods which hasn't 889 // been entered yet, we set the thread local variable 890 // _do_not_unlock_if_synchronized to true. The remove_activation will 891 // check this flag. 892 893 const Address do_not_unlock_if_synchronized(unlock_thread, 894 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 895 __ movbool(do_not_unlock_if_synchronized, true); 896 897 // make sure method is native & not abstract 898 #ifdef ASSERT 899 __ movl(rax, access_flags); 900 { 901 Label L; 902 __ testl(rax, JVM_ACC_NATIVE); 903 __ jcc(Assembler::notZero, L); 904 __ stop("tried to execute non-native method as native"); 905 __ bind(L); 906 } 907 { Label L; 908 __ testl(rax, JVM_ACC_ABSTRACT); 909 __ jcc(Assembler::zero, L); 910 __ stop("tried to execute abstract method in interpreter"); 911 __ bind(L); 912 } 913 #endif 914 915 916 // increment invocation count & check for overflow 917 Label invocation_counter_overflow; 918 if (inc_counter) { 919 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 920 } 921 922 Label continue_after_compile; 923 924 __ bind(continue_after_compile); 925 926 bang_stack_shadow_pages(true); 927 928 // reset the _do_not_unlock_if_synchronized flag 929 NOT_LP64(__ movl(rax, STATE(_thread));) // get thread 930 __ movbool(do_not_unlock_if_synchronized, false); 931 932 933 // check for synchronized native methods 934 // 935 // Note: This must happen *after* invocation counter check, since 936 // when overflow happens, the method should not be locked. 937 if (synchronized) { 938 // potentially kills rax, rcx, rdx, rdi 939 lock_method(); 940 } else { 941 // no synchronization necessary 942 #ifdef ASSERT 943 { Label L; 944 __ movl(rax, access_flags); 945 __ testl(rax, JVM_ACC_SYNCHRONIZED); 946 __ jcc(Assembler::zero, L); 947 __ stop("method needs synchronization"); 948 __ bind(L); 949 } 950 #endif 951 } 952 953 // start execution 954 955 // jvmti support 956 __ notify_method_entry(); 957 958 // work registers 959 const Register method = rbx; 960 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi); 961 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1 962 963 // allocate space for parameters 964 __ movptr(method, STATE(_method)); 965 __ verify_method_ptr(method); 966 { 967 const Address constMethod (method, Method::const_offset()); 968 const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset()); 969 __ movptr(t, constMethod); 970 __ load_unsigned_short(t, size_of_parameters); 971 } 972 __ shll(t, 2); 973 #ifdef _LP64 974 __ subptr(rsp, t); 975 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 976 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 977 #else 978 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 979 __ subptr(rsp, t); 980 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 981 #endif // _LP64 982 983 // get signature handler 984 Label pending_exception_present; 985 986 { Label L; 987 __ movptr(t, Address(method, Method::signature_handler_offset())); 988 __ testptr(t, t); 989 __ jcc(Assembler::notZero, L); 990 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false); 991 __ movptr(method, STATE(_method)); 992 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 993 __ jcc(Assembler::notEqual, pending_exception_present); 994 __ verify_method_ptr(method); 995 __ movptr(t, Address(method, Method::signature_handler_offset())); 996 __ bind(L); 997 } 998 #ifdef ASSERT 999 { 1000 Label L; 1001 __ push(t); 1002 __ get_thread(t); // get vm's javathread* 1003 __ cmpptr(t, STATE(_thread)); 1004 __ jcc(Assembler::equal, L); 1005 __ int3(); 1006 __ bind(L); 1007 __ pop(t); 1008 } 1009 #endif // 1010 1011 const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from(); 1012 // call signature handler 1013 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code"); 1014 1015 // The generated handlers do not touch RBX (the method oop). 1016 // However, large signatures cannot be cached and are generated 1017 // each time here. The slow-path generator will blow RBX 1018 // sometime, so we must reload it after the call. 1019 __ movptr(from_ptr, STATE(_locals)); // get the from pointer 1020 __ call(t); 1021 __ movptr(method, STATE(_method)); 1022 __ verify_method_ptr(method); 1023 1024 // result handler is in rax 1025 // set result handler 1026 __ movptr(STATE(_result_handler), rax); 1027 1028 1029 // get native function entry point 1030 { Label L; 1031 __ movptr(rax, Address(method, Method::native_function_offset())); 1032 __ testptr(rax, rax); 1033 __ jcc(Assembler::notZero, L); 1034 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 1035 __ movptr(method, STATE(_method)); 1036 __ verify_method_ptr(method); 1037 __ movptr(rax, Address(method, Method::native_function_offset())); 1038 __ bind(L); 1039 } 1040 1041 // pass mirror handle if static call 1042 { Label L; 1043 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1044 __ movl(t, Address(method, Method::access_flags_offset())); 1045 __ testl(t, JVM_ACC_STATIC); 1046 __ jcc(Assembler::zero, L); 1047 // get mirror 1048 __ movptr(t, Address(method, Method:: const_offset())); 1049 __ movptr(t, Address(t, ConstMethod::constants_offset())); 1050 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 1051 __ movptr(t, Address(t, mirror_offset)); 1052 // copy mirror into activation object 1053 __ movptr(STATE(_oop_temp), t); 1054 // pass handle to mirror 1055 #ifdef _LP64 1056 __ lea(c_rarg1, STATE(_oop_temp)); 1057 #else 1058 __ lea(t, STATE(_oop_temp)); 1059 __ movptr(Address(rsp, wordSize), t); 1060 #endif // _LP64 1061 __ bind(L); 1062 } 1063 #ifdef ASSERT 1064 { 1065 Label L; 1066 __ push(t); 1067 __ get_thread(t); // get vm's javathread* 1068 __ cmpptr(t, STATE(_thread)); 1069 __ jcc(Assembler::equal, L); 1070 __ int3(); 1071 __ bind(L); 1072 __ pop(t); 1073 } 1074 #endif // 1075 1076 // pass JNIEnv 1077 #ifdef _LP64 1078 __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset())); 1079 #else 1080 __ movptr(thread, STATE(_thread)); // get thread 1081 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1082 1083 __ movptr(Address(rsp, 0), t); 1084 #endif // _LP64 1085 1086 #ifdef ASSERT 1087 { 1088 Label L; 1089 __ push(t); 1090 __ get_thread(t); // get vm's javathread* 1091 __ cmpptr(t, STATE(_thread)); 1092 __ jcc(Assembler::equal, L); 1093 __ int3(); 1094 __ bind(L); 1095 __ pop(t); 1096 } 1097 #endif // 1098 1099 #ifdef ASSERT 1100 { Label L; 1101 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1102 __ cmpl(t, _thread_in_Java); 1103 __ jcc(Assembler::equal, L); 1104 __ stop("Wrong thread state in native stub"); 1105 __ bind(L); 1106 } 1107 #endif 1108 1109 // Change state to native (we save the return address in the thread, since it might not 1110 // be pushed on the stack when we do a a stack traversal). It is enough that the pc() 1111 // points into the right code segment. It does not have to be the correct return pc. 1112 1113 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1114 1115 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); 1116 1117 __ call(rax); 1118 1119 // result potentially in rdx:rax or ST0 1120 __ movptr(method, STATE(_method)); 1121 NOT_LP64(__ movptr(thread, STATE(_thread));) // get thread 1122 1123 // The potential result is in ST(0) & rdx:rax 1124 // With C++ interpreter we leave any possible result in ST(0) until we are in result handler and then 1125 // we do the appropriate stuff for returning the result. rdx:rax must always be saved because just about 1126 // anything we do here will destroy it, st(0) is only saved if we re-enter the vm where it would 1127 // be destroyed. 1128 // It is safe to do these pushes because state is _thread_in_native and return address will be found 1129 // via _last_native_pc and not via _last_jave_sp 1130 1131 // Must save the value of ST(0)/xmm0 since it could be destroyed before we get to result handler 1132 { Label Lpush, Lskip; 1133 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1134 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1135 __ cmpptr(STATE(_result_handler), float_handler.addr()); 1136 __ jcc(Assembler::equal, Lpush); 1137 __ cmpptr(STATE(_result_handler), double_handler.addr()); 1138 __ jcc(Assembler::notEqual, Lskip); 1139 __ bind(Lpush); 1140 __ subptr(rsp, 2*wordSize); 1141 if ( UseSSE < 2 ) { 1142 __ fstp_d(Address(rsp, 0)); 1143 } else { 1144 __ movdbl(Address(rsp, 0), xmm0); 1145 } 1146 __ bind(Lskip); 1147 } 1148 1149 // save rax:rdx for potential use by result handler. 1150 __ push(rax); 1151 #ifndef _LP64 1152 __ push(rdx); 1153 #endif // _LP64 1154 1155 // Verify or restore cpu control state after JNI call 1156 __ restore_cpu_control_state_after_jni(); 1157 1158 // change thread state 1159 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); 1160 if(os::is_MP()) { 1161 // Write serialization page so VM thread can do a pseudo remote membar. 1162 // We use the current thread pointer to calculate a thread specific 1163 // offset to write to within the page. This minimizes bus traffic 1164 // due to cache line collision. 1165 __ serialize_memory(thread, rcx); 1166 } 1167 1168 // check for safepoint operation in progress and/or pending suspend requests 1169 { Label Continue; 1170 1171 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1172 SafepointSynchronize::_not_synchronized); 1173 1174 // threads running native code and they are expected to self-suspend 1175 // when leaving the _thread_in_native state. We need to check for 1176 // pending suspend requests here. 1177 Label L; 1178 __ jcc(Assembler::notEqual, L); 1179 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1180 __ jcc(Assembler::equal, Continue); 1181 __ bind(L); 1182 1183 // Don't use call_VM as it will see a possible pending exception and forward it 1184 // and never return here preventing us from clearing _last_native_pc down below. 1185 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are 1186 // preserved and correspond to the bcp/locals pointers. 1187 // 1188 1189 ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1190 thread); 1191 __ increment(rsp, wordSize); 1192 1193 __ movptr(method, STATE(_method)); 1194 __ verify_method_ptr(method); 1195 __ movptr(thread, STATE(_thread)); // get thread 1196 1197 __ bind(Continue); 1198 } 1199 1200 // change thread state 1201 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1202 1203 __ reset_last_Java_frame(thread, true, true); 1204 1205 // reset handle block 1206 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1207 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1208 1209 // If result was an oop then unbox and save it in the frame 1210 { Label L; 1211 Label no_oop, store_result; 1212 ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT)); 1213 __ cmpptr(STATE(_result_handler), oop_handler.addr()); 1214 __ jcc(Assembler::notEqual, no_oop); 1215 #ifndef _LP64 1216 __ pop(rdx); 1217 #endif // _LP64 1218 __ pop(rax); 1219 __ testptr(rax, rax); 1220 __ jcc(Assembler::zero, store_result); 1221 // unbox 1222 __ movptr(rax, Address(rax, 0)); 1223 __ bind(store_result); 1224 __ movptr(STATE(_oop_temp), rax); 1225 // keep stack depth as expected by pushing oop which will eventually be discarded 1226 __ push(rax); 1227 #ifndef _LP64 1228 __ push(rdx); 1229 #endif // _LP64 1230 __ bind(no_oop); 1231 } 1232 1233 { 1234 Label no_reguard; 1235 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); 1236 __ jcc(Assembler::notEqual, no_reguard); 1237 1238 __ pusha(); 1239 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1240 __ popa(); 1241 1242 __ bind(no_reguard); 1243 } 1244 1245 1246 // QQQ Seems like for native methods we simply return and the caller will see the pending 1247 // exception and do the right thing. Certainly the interpreter will, don't know about 1248 // compiled methods. 1249 // Seems that the answer to above is no this is wrong. The old code would see the exception 1250 // and forward it before doing the unlocking and notifying jvmdi that method has exited. 1251 // This seems wrong need to investigate the spec. 1252 1253 // handle exceptions (exception handling will handle unlocking!) 1254 { Label L; 1255 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1256 __ jcc(Assembler::zero, L); 1257 __ bind(pending_exception_present); 1258 1259 // There are potential results on the stack (rax/rdx, ST(0)) we ignore these and simply 1260 // return and let caller deal with exception. This skips the unlocking here which 1261 // seems wrong but seems to be what asm interpreter did. Can't find this in the spec. 1262 // Note: must preverve method in rbx 1263 // 1264 1265 // remove activation 1266 1267 __ movptr(t, STATE(_sender_sp)); 1268 __ leave(); // remove frame anchor 1269 __ pop(rdi); // get return address 1270 __ movptr(state, STATE(_prev_link)); // get previous state for return 1271 __ mov(rsp, t); // set sp to sender sp 1272 __ push(rdi); // push throwing pc 1273 // The skips unlocking!! This seems to be what asm interpreter does but seems 1274 // very wrong. Not clear if this violates the spec. 1275 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1276 __ bind(L); 1277 } 1278 1279 // do unlocking if necessary 1280 { Label L; 1281 __ movl(t, Address(method, Method::access_flags_offset())); 1282 __ testl(t, JVM_ACC_SYNCHRONIZED); 1283 __ jcc(Assembler::zero, L); 1284 // the code below should be shared with interpreter macro assembler implementation 1285 { Label unlock; 1286 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1287 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1288 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1289 __ movptr(monitor, STATE(_monitor_base)); 1290 __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor 1291 1292 __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); 1293 __ testptr(t, t); 1294 __ jcc(Assembler::notZero, unlock); 1295 1296 // Entry already unlocked, need to throw exception 1297 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1298 __ should_not_reach_here(); 1299 1300 __ bind(unlock); 1301 __ unlock_object(monitor); 1302 // unlock can blow rbx so restore it for path that needs it below 1303 __ movptr(method, STATE(_method)); 1304 } 1305 __ bind(L); 1306 } 1307 1308 // jvmti support 1309 // Note: This must happen _after_ handling/throwing any exceptions since 1310 // the exception handler code notifies the runtime of method exits 1311 // too. If this happens before, method entry/exit notifications are 1312 // not properly paired (was bug - gri 11/22/99). 1313 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1314 1315 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result 1316 #ifndef _LP64 1317 __ pop(rdx); 1318 #endif // _LP64 1319 __ pop(rax); 1320 __ movptr(t, STATE(_result_handler)); // get result handler 1321 __ call(t); // call result handler to convert to tosca form 1322 1323 // remove activation 1324 1325 __ movptr(t, STATE(_sender_sp)); 1326 1327 __ leave(); // remove frame anchor 1328 __ pop(rdi); // get return address 1329 __ movptr(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller) 1330 __ mov(rsp, t); // set sp to sender sp 1331 __ jmp(rdi); 1332 1333 // invocation counter overflow 1334 if (inc_counter) { 1335 // Handle overflow of counter and compile method 1336 __ bind(invocation_counter_overflow); 1337 generate_counter_overflow(&continue_after_compile); 1338 } 1339 1340 return entry_point; 1341 } 1342 1343 // Generate entries that will put a result type index into rcx 1344 void CppInterpreterGenerator::generate_deopt_handling() { 1345 1346 Label return_from_deopt_common; 1347 1348 // Generate entries that will put a result type index into rcx 1349 // deopt needs to jump to here to enter the interpreter (return a result) 1350 deopt_frame_manager_return_atos = __ pc(); 1351 1352 // rax is live here 1353 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_OBJECT)); // Result stub address array index 1354 __ jmp(return_from_deopt_common); 1355 1356 1357 // deopt needs to jump to here to enter the interpreter (return a result) 1358 deopt_frame_manager_return_btos = __ pc(); 1359 1360 // rax is live here 1361 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_BOOLEAN)); // Result stub address array index 1362 __ jmp(return_from_deopt_common); 1363 1364 // deopt needs to jump to here to enter the interpreter (return a result) 1365 deopt_frame_manager_return_itos = __ pc(); 1366 1367 // rax is live here 1368 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_INT)); // Result stub address array index 1369 __ jmp(return_from_deopt_common); 1370 1371 // deopt needs to jump to here to enter the interpreter (return a result) 1372 1373 deopt_frame_manager_return_ltos = __ pc(); 1374 // rax,rdx are live here 1375 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_LONG)); // Result stub address array index 1376 __ jmp(return_from_deopt_common); 1377 1378 // deopt needs to jump to here to enter the interpreter (return a result) 1379 1380 deopt_frame_manager_return_ftos = __ pc(); 1381 // st(0) is live here 1382 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index 1383 __ jmp(return_from_deopt_common); 1384 1385 // deopt needs to jump to here to enter the interpreter (return a result) 1386 deopt_frame_manager_return_dtos = __ pc(); 1387 1388 // st(0) is live here 1389 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index 1390 __ jmp(return_from_deopt_common); 1391 1392 // deopt needs to jump to here to enter the interpreter (return a result) 1393 deopt_frame_manager_return_vtos = __ pc(); 1394 1395 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_VOID)); 1396 1397 // Deopt return common 1398 // an index is present in rcx that lets us move any possible result being 1399 // return to the interpreter's stack 1400 // 1401 // Because we have a full sized interpreter frame on the youngest 1402 // activation the stack is pushed too deep to share the tosca to 1403 // stack converters directly. We shrink the stack to the desired 1404 // amount and then push result and then re-extend the stack. 1405 // We could have the code in size_activation layout a short 1406 // frame for the top activation but that would look different 1407 // than say sparc (which needs a full size activation because 1408 // the windows are in the way. Really it could be short? QQQ 1409 // 1410 __ bind(return_from_deopt_common); 1411 1412 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1413 1414 // setup rsp so we can push the "result" as needed. 1415 __ movptr(rsp, STATE(_stack)); // trim stack (is prepushed) 1416 __ addptr(rsp, wordSize); // undo prepush 1417 1418 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack); 1419 // Address index(noreg, rcx, Address::times_ptr); 1420 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr))); 1421 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack))); 1422 __ call(rcx); // call result converter 1423 1424 __ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume); 1425 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present) 1426 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed, 1427 // result if any on stack already ) 1428 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth 1429 } 1430 1431 // Generate the code to handle a more_monitors message from the c++ interpreter 1432 void CppInterpreterGenerator::generate_more_monitors() { 1433 1434 1435 Label entry, loop; 1436 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 1437 // 1. compute new pointers // rsp: old expression stack top 1438 __ movptr(rdx, STATE(_stack_base)); // rdx: old expression stack bottom 1439 __ subptr(rsp, entry_size); // move expression stack top limit 1440 __ subptr(STATE(_stack), entry_size); // update interpreter stack top 1441 __ subptr(STATE(_stack_limit), entry_size); // inform interpreter 1442 __ subptr(rdx, entry_size); // move expression stack bottom 1443 __ movptr(STATE(_stack_base), rdx); // inform interpreter 1444 __ movptr(rcx, STATE(_stack)); // set start value for copy loop 1445 __ jmp(entry); 1446 // 2. move expression stack contents 1447 __ bind(loop); 1448 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location 1449 __ movptr(Address(rcx, 0), rbx); // and store it at new location 1450 __ addptr(rcx, wordSize); // advance to next word 1451 __ bind(entry); 1452 __ cmpptr(rcx, rdx); // check if bottom reached 1453 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word 1454 // now zero the slot so we can find it. 1455 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); 1456 __ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors); 1457 } 1458 1459 1460 // Initial entry to C++ interpreter from the call_stub. 1461 // This entry point is called the frame manager since it handles the generation 1462 // of interpreter activation frames via requests directly from the vm (via call_stub) 1463 // and via requests from the interpreter. The requests from the call_stub happen 1464 // directly thru the entry point. Requests from the interpreter happen via returning 1465 // from the interpreter and examining the message the interpreter has returned to 1466 // the frame manager. The frame manager can take the following requests: 1467 1468 // NO_REQUEST - error, should never happen. 1469 // MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and 1470 // allocate a new monitor. 1471 // CALL_METHOD - setup a new activation to call a new method. Very similar to what 1472 // happens during entry during the entry via the call stub. 1473 // RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub. 1474 // 1475 // Arguments: 1476 // 1477 // rbx: Method* 1478 // rcx: receiver - unused (retrieved from stack as needed) 1479 // rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2) 1480 // 1481 // 1482 // Stack layout at entry 1483 // 1484 // [ return address ] <--- rsp 1485 // [ parameter n ] 1486 // ... 1487 // [ parameter 1 ] 1488 // [ expression stack ] 1489 // 1490 // 1491 // We are free to blow any registers we like because the call_stub which brought us here 1492 // initially has preserved the callee save registers already. 1493 // 1494 // 1495 1496 static address interpreter_frame_manager = NULL; 1497 1498 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1499 1500 // rbx: Method* 1501 // rsi/r13: sender sp 1502 1503 // Because we redispatch "recursive" interpreter entries thru this same entry point 1504 // the "input" register usage is a little strange and not what you expect coming 1505 // from the call_stub. From the call stub rsi/rdi (current/previous) interpreter 1506 // state are NULL but on "recursive" dispatches they are what you'd expect. 1507 // rsi: current interpreter state (C++ interpreter) must preserve (null from call_stub/c1/c2) 1508 1509 1510 // A single frame manager is plenty as we don't specialize for synchronized. We could and 1511 // the code is pretty much ready. Would need to change the test below and for good measure 1512 // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized 1513 // routines. Not clear this is worth it yet. 1514 1515 if (interpreter_frame_manager) return interpreter_frame_manager; 1516 1517 address entry_point = __ pc(); 1518 1519 Label dispatch_entry_2; 1520 __ movptr(rcx, sender_sp_on_entry); 1521 __ movptr(state, (int32_t)NULL_WORD); // no current activation 1522 1523 __ jmp(dispatch_entry_2); 1524 1525 const Register locals = rdi; 1526 1527 Label re_dispatch; 1528 1529 __ bind(re_dispatch); 1530 1531 // save sender sp (doesn't include return address 1532 __ lea(rcx, Address(rsp, wordSize)); 1533 1534 __ bind(dispatch_entry_2); 1535 1536 // save sender sp 1537 __ push(rcx); 1538 1539 const Address constMethod (rbx, Method::const_offset()); 1540 const Address access_flags (rbx, Method::access_flags_offset()); 1541 const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); 1542 const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset()); 1543 1544 // const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 1545 // const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 1546 // const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); 1547 1548 // get parameter size (always needed) 1549 __ movptr(rdx, constMethod); 1550 __ load_unsigned_short(rcx, size_of_parameters); 1551 1552 // rbx: Method* 1553 // rcx: size of parameters 1554 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1555 1556 __ subptr(rdx, rcx); // rdx = no. of additional locals 1557 1558 // see if we've got enough room on the stack for locals plus overhead. 1559 generate_stack_overflow_check(); // C++ 1560 1561 // c++ interpreter does not use stack banging or any implicit exceptions 1562 // leave for now to verify that check is proper. 1563 bang_stack_shadow_pages(false); 1564 1565 1566 1567 // compute beginning of parameters (rdi) 1568 __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize)); 1569 1570 // save sender's sp 1571 // __ movl(rcx, rsp); 1572 1573 // get sender's sp 1574 __ pop(rcx); 1575 1576 // get return address 1577 __ pop(rax); 1578 1579 // rdx - # of additional locals 1580 // allocate space for locals 1581 // explicitly initialize locals 1582 { 1583 Label exit, loop; 1584 __ testl(rdx, rdx); // (32bit ok) 1585 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1586 __ bind(loop); 1587 __ push((int32_t)NULL_WORD); // initialize local variables 1588 __ decrement(rdx); // until everything initialized 1589 __ jcc(Assembler::greater, loop); 1590 __ bind(exit); 1591 } 1592 1593 1594 // Assumes rax = return address 1595 1596 // allocate and initialize new interpreterState and method expression stack 1597 // IN(locals) -> locals 1598 // IN(state) -> any current interpreter activation 1599 // destroys rax, rcx, rdx, rdi 1600 // OUT (state) -> new interpreterState 1601 // OUT(rsp) -> bottom of methods expression stack 1602 1603 generate_compute_interpreter_state(state, locals, rcx, false); 1604 1605 // Call interpreter 1606 1607 Label call_interpreter; 1608 __ bind(call_interpreter); 1609 1610 // c++ interpreter does not use stack banging or any implicit exceptions 1611 // leave for now to verify that check is proper. 1612 bang_stack_shadow_pages(false); 1613 1614 1615 // Call interpreter enter here if message is 1616 // set and we know stack size is valid 1617 1618 Label call_interpreter_2; 1619 1620 __ bind(call_interpreter_2); 1621 1622 { 1623 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1624 1625 #ifdef _LP64 1626 __ mov(c_rarg0, state); 1627 #else 1628 __ push(state); // push arg to interpreter 1629 __ movptr(thread, STATE(_thread)); 1630 #endif // _LP64 1631 1632 // We can setup the frame anchor with everything we want at this point 1633 // as we are thread_in_Java and no safepoints can occur until we go to 1634 // vm mode. We do have to clear flags on return from vm but that is it 1635 // 1636 __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp); 1637 __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp); 1638 1639 // Call the interpreter 1640 1641 RuntimeAddress normal(CAST_FROM_FN_PTR(address, BytecodeInterpreter::run)); 1642 RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks)); 1643 1644 __ call(JvmtiExport::can_post_interpreter_events() ? checking : normal); 1645 NOT_LP64(__ pop(rax);) // discard parameter to run 1646 // 1647 // state is preserved since it is callee saved 1648 // 1649 1650 // reset_last_Java_frame 1651 1652 NOT_LP64(__ movl(thread, STATE(_thread));) 1653 __ reset_last_Java_frame(thread, true, true); 1654 } 1655 1656 // examine msg from interpreter to determine next action 1657 1658 __ movl(rdx, STATE(_msg)); // Get new message 1659 1660 Label call_method; 1661 Label return_from_interpreted_method; 1662 Label throw_exception; 1663 Label bad_msg; 1664 Label do_OSR; 1665 1666 __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method); 1667 __ jcc(Assembler::equal, call_method); 1668 __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method); 1669 __ jcc(Assembler::equal, return_from_interpreted_method); 1670 __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr); 1671 __ jcc(Assembler::equal, do_OSR); 1672 __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception); 1673 __ jcc(Assembler::equal, throw_exception); 1674 __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors); 1675 __ jcc(Assembler::notEqual, bad_msg); 1676 1677 // Allocate more monitor space, shuffle expression stack.... 1678 1679 generate_more_monitors(); 1680 1681 __ jmp(call_interpreter); 1682 1683 // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode) 1684 unctrap_frame_manager_entry = __ pc(); 1685 // 1686 // Load the registers we need. 1687 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1688 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth 1689 __ jmp(call_interpreter_2); 1690 1691 1692 1693 //============================================================================= 1694 // Returning from a compiled method into a deopted method. The bytecode at the 1695 // bcp has completed. The result of the bytecode is in the native abi (the tosca 1696 // for the template based interpreter). Any stack space that was used by the 1697 // bytecode that has completed has been removed (e.g. parameters for an invoke) 1698 // so all that we have to do is place any pending result on the expression stack 1699 // and resume execution on the next bytecode. 1700 1701 1702 generate_deopt_handling(); 1703 __ jmp(call_interpreter); 1704 1705 1706 // Current frame has caught an exception we need to dispatch to the 1707 // handler. We can get here because a native interpreter frame caught 1708 // an exception in which case there is no handler and we must rethrow 1709 // If it is a vanilla interpreted frame the we simply drop into the 1710 // interpreter and let it do the lookup. 1711 1712 Interpreter::_rethrow_exception_entry = __ pc(); 1713 // rax: exception 1714 // rdx: return address/pc that threw exception 1715 1716 Label return_with_exception; 1717 Label unwind_and_forward; 1718 1719 // restore state pointer. 1720 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1721 1722 __ movptr(rbx, STATE(_method)); // get method 1723 #ifdef _LP64 1724 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 1725 #else 1726 __ movl(rcx, STATE(_thread)); // get thread 1727 1728 // Store exception with interpreter will expect it 1729 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax); 1730 #endif // _LP64 1731 1732 // is current frame vanilla or native? 1733 1734 __ movl(rdx, access_flags); 1735 __ testl(rdx, JVM_ACC_NATIVE); 1736 __ jcc(Assembler::zero, return_with_exception); // vanilla interpreted frame, handle directly 1737 1738 // We drop thru to unwind a native interpreted frame with a pending exception 1739 // We jump here for the initial interpreter frame with exception pending 1740 // We unwind the current acivation and forward it to our caller. 1741 1742 __ bind(unwind_and_forward); 1743 1744 // unwind rbp, return stack to unextended value and re-push return address 1745 1746 __ movptr(rcx, STATE(_sender_sp)); 1747 __ leave(); 1748 __ pop(rdx); 1749 __ mov(rsp, rcx); 1750 __ push(rdx); 1751 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1752 1753 // Return point from a call which returns a result in the native abi 1754 // (c1/c2/jni-native). This result must be processed onto the java 1755 // expression stack. 1756 // 1757 // A pending exception may be present in which case there is no result present 1758 1759 Label resume_interpreter; 1760 Label do_float; 1761 Label do_double; 1762 Label done_conv; 1763 1764 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 1765 if (UseSSE < 2) { 1766 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1767 __ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed 1768 __ movl(rcx, Address(rbx, Method::result_index_offset())); 1769 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index 1770 __ jcc(Assembler::equal, do_float); 1771 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index 1772 __ jcc(Assembler::equal, do_double); 1773 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) 1774 __ empty_FPU_stack(); 1775 #endif // COMPILER2 1776 __ jmp(done_conv); 1777 1778 __ bind(do_float); 1779 #ifdef COMPILER2 1780 for (int i = 1; i < 8; i++) { 1781 __ ffree(i); 1782 } 1783 #endif // COMPILER2 1784 __ jmp(done_conv); 1785 __ bind(do_double); 1786 #ifdef COMPILER2 1787 for (int i = 1; i < 8; i++) { 1788 __ ffree(i); 1789 } 1790 #endif // COMPILER2 1791 __ jmp(done_conv); 1792 } else { 1793 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 1794 __ jmp(done_conv); 1795 } 1796 1797 // Return point to interpreter from compiled/native method 1798 InternalAddress return_from_native_method(__ pc()); 1799 1800 __ bind(done_conv); 1801 1802 1803 // Result if any is in tosca. The java expression stack is in the state that the 1804 // calling convention left it (i.e. params may or may not be present) 1805 // Copy the result from tosca and place it on java expression stack. 1806 1807 // Restore rsi/r13 as compiled code may not preserve it 1808 1809 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1810 1811 // restore stack to what we had when we left (in case i2c extended it) 1812 1813 __ movptr(rsp, STATE(_stack)); 1814 __ lea(rsp, Address(rsp, wordSize)); 1815 1816 // If there is a pending exception then we don't really have a result to process 1817 1818 #ifdef _LP64 1819 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1820 #else 1821 __ movptr(rcx, STATE(_thread)); // get thread 1822 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1823 #endif // _LP64 1824 __ jcc(Assembler::notZero, return_with_exception); 1825 1826 // get method just executed 1827 __ movptr(rbx, STATE(_result._to_call._callee)); 1828 1829 // callee left args on top of expression stack, remove them 1830 __ movptr(rcx, constMethod); 1831 __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset())); 1832 1833 __ lea(rsp, Address(rsp, rcx, Address::times_ptr)); 1834 1835 __ movl(rcx, Address(rbx, Method::result_index_offset())); 1836 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack); 1837 // Address index(noreg, rax, Address::times_ptr); 1838 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr))); 1839 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack))); 1840 __ call(rcx); // call result converter 1841 __ jmp(resume_interpreter); 1842 1843 // An exception is being caught on return to a vanilla interpreter frame. 1844 // Empty the stack and resume interpreter 1845 1846 __ bind(return_with_exception); 1847 1848 // Exception present, empty stack 1849 __ movptr(rsp, STATE(_stack_base)); 1850 __ jmp(resume_interpreter); 1851 1852 // Return from interpreted method we return result appropriate to the caller (i.e. "recursive" 1853 // interpreter call, or native) and unwind this interpreter activation. 1854 // All monitors should be unlocked. 1855 1856 __ bind(return_from_interpreted_method); 1857 1858 Label return_to_initial_caller; 1859 1860 __ movptr(rbx, STATE(_method)); // get method just executed 1861 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call? 1862 __ movl(rax, Address(rbx, Method::result_index_offset())); // get result type index 1863 __ jcc(Assembler::equal, return_to_initial_caller); // back to native code (call_stub/c1/c2) 1864 1865 // Copy result to callers java stack 1866 ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack); 1867 // Address index(noreg, rax, Address::times_ptr); 1868 1869 __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr))); 1870 // __ movl(rax, Address(noreg, rax, Address::times_ptr, int(AbstractInterpreter::_stack_to_stack))); 1871 __ call(rax); // call result converter 1872 1873 Label unwind_recursive_activation; 1874 __ bind(unwind_recursive_activation); 1875 1876 // returning to interpreter method from "recursive" interpreter call 1877 // result converter left rax pointing to top of the java stack for method we are returning 1878 // to. Now all we must do is unwind the state from the completed call 1879 1880 __ movptr(state, STATE(_prev_link)); // unwind state 1881 __ leave(); // pop the frame 1882 __ mov(rsp, rax); // unwind stack to remove args 1883 1884 // Resume the interpreter. The current frame contains the current interpreter 1885 // state object. 1886 // 1887 1888 __ bind(resume_interpreter); 1889 1890 // state == interpreterState object for method we are resuming 1891 1892 __ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume); 1893 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present) 1894 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed, 1895 // result if any on stack already ) 1896 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth 1897 __ jmp(call_interpreter_2); // No need to bang 1898 1899 // interpreter returning to native code (call_stub/c1/c2) 1900 // convert result and unwind initial activation 1901 // rax - result index 1902 1903 __ bind(return_to_initial_caller); 1904 ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi); 1905 // Address index(noreg, rax, Address::times_ptr); 1906 1907 __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr))); 1908 __ call(rax); // call result converter 1909 1910 Label unwind_initial_activation; 1911 __ bind(unwind_initial_activation); 1912 1913 // RETURN TO CALL_STUB/C1/C2 code (result if any in rax/rdx ST(0)) 1914 1915 /* Current stack picture 1916 1917 [ incoming parameters ] 1918 [ extra locals ] 1919 [ return address to CALL_STUB/C1/C2] 1920 fp -> [ CALL_STUB/C1/C2 fp ] 1921 BytecodeInterpreter object 1922 expression stack 1923 sp -> 1924 1925 */ 1926 1927 // return restoring the stack to the original sender_sp value 1928 1929 __ movptr(rcx, STATE(_sender_sp)); 1930 __ leave(); 1931 __ pop(rdi); // get return address 1932 // set stack to sender's sp 1933 __ mov(rsp, rcx); 1934 __ jmp(rdi); // return to call_stub 1935 1936 // OSR request, adjust return address to make current frame into adapter frame 1937 // and enter OSR nmethod 1938 1939 __ bind(do_OSR); 1940 1941 Label remove_initial_frame; 1942 1943 // We are going to pop this frame. Is there another interpreter frame underneath 1944 // it or is it callstub/compiled? 1945 1946 // Move buffer to the expected parameter location 1947 __ movptr(rcx, STATE(_result._osr._osr_buf)); 1948 1949 __ movptr(rax, STATE(_result._osr._osr_entry)); 1950 1951 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call? 1952 __ jcc(Assembler::equal, remove_initial_frame); // back to native code (call_stub/c1/c2) 1953 1954 __ movptr(sender_sp_on_entry, STATE(_sender_sp)); // get sender's sp in expected register 1955 __ leave(); // pop the frame 1956 __ mov(rsp, sender_sp_on_entry); // trim any stack expansion 1957 1958 1959 // We know we are calling compiled so push specialized return 1960 // method uses specialized entry, push a return so we look like call stub setup 1961 // this path will handle fact that result is returned in registers and not 1962 // on the java stack. 1963 1964 __ pushptr(return_from_native_method.addr()); 1965 1966 __ jmp(rax); 1967 1968 __ bind(remove_initial_frame); 1969 1970 __ movptr(rdx, STATE(_sender_sp)); 1971 __ leave(); 1972 // get real return 1973 __ pop(rsi); 1974 // set stack to sender's sp 1975 __ mov(rsp, rdx); 1976 // repush real return 1977 __ push(rsi); 1978 // Enter OSR nmethod 1979 __ jmp(rax); 1980 1981 1982 1983 1984 // Call a new method. All we do is (temporarily) trim the expression stack 1985 // push a return address to bring us back to here and leap to the new entry. 1986 1987 __ bind(call_method); 1988 1989 // stack points to next free location and not top element on expression stack 1990 // method expects sp to be pointing to topmost element 1991 1992 __ movptr(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top 1993 __ lea(rsp, Address(rsp, wordSize)); 1994 1995 __ movptr(rbx, STATE(_result._to_call._callee)); // get method to execute 1996 1997 // don't need a return address if reinvoking interpreter 1998 1999 // Make it look like call_stub calling conventions 2000 2001 // Get (potential) receiver 2002 // get size of parameters in words 2003 __ movptr(rcx, constMethod); 2004 __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset())); 2005 2006 ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation)); 2007 __ pushptr(recursive.addr()); // make it look good in the debugger 2008 2009 InternalAddress entry(entry_point); 2010 __ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter? 2011 __ jcc(Assembler::equal, re_dispatch); // yes 2012 2013 __ pop(rax); // pop dummy address 2014 2015 2016 // get specialized entry 2017 __ movptr(rax, STATE(_result._to_call._callee_entry_point)); 2018 // set sender SP 2019 __ mov(sender_sp_on_entry, rsp); 2020 2021 // method uses specialized entry, push a return so we look like call stub setup 2022 // this path will handle fact that result is returned in registers and not 2023 // on the java stack. 2024 2025 __ pushptr(return_from_native_method.addr()); 2026 2027 __ jmp(rax); 2028 2029 __ bind(bad_msg); 2030 __ stop("Bad message from interpreter"); 2031 2032 // Interpreted method "returned" with an exception pass it on... 2033 // Pass result, unwind activation and continue/return to interpreter/call_stub 2034 // We handle result (if any) differently based on return to interpreter or call_stub 2035 2036 Label unwind_initial_with_pending_exception; 2037 2038 __ bind(throw_exception); 2039 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from recursive interpreter call? 2040 __ jcc(Assembler::equal, unwind_initial_with_pending_exception); // no, back to native code (call_stub/c1/c2) 2041 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value 2042 __ addptr(rax, wordSize); // account for prepush before we return 2043 __ jmp(unwind_recursive_activation); 2044 2045 __ bind(unwind_initial_with_pending_exception); 2046 2047 // We will unwind the current (initial) interpreter frame and forward 2048 // the exception to the caller. We must put the exception in the 2049 // expected register and clear pending exception and then forward. 2050 2051 __ jmp(unwind_and_forward); 2052 2053 interpreter_frame_manager = entry_point; 2054 return entry_point; 2055 } 2056 2057 2058 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 2059 : CppInterpreterGenerator(code) { 2060 generate_all(); // down here so it can be "virtual" 2061 } 2062 2063 // Deoptimization helpers for C++ interpreter 2064 2065 // How much stack a method activation needs in words. 2066 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 2067 2068 const int stub_code = 4; // see generate_call_stub 2069 // Save space for one monitor to get into the interpreted method in case 2070 // the method is synchronized 2071 int monitor_size = method->is_synchronized() ? 2072 1*frame::interpreter_frame_monitor_size() : 0; 2073 2074 // total static overhead size. Account for interpreter state object, return 2075 // address, saved rbp and 2 words for a "static long no_params() method" issue. 2076 2077 const int overhead_size = sizeof(BytecodeInterpreter)/wordSize + 2078 ( frame::sender_sp_offset - frame::link_offset) + 2; 2079 2080 const int method_stack = (method->max_locals() + method->max_stack()) * 2081 Interpreter::stackElementWords; 2082 return overhead_size + method_stack + stub_code; 2083 } 2084 2085 // returns the activation size. 2086 static int size_activation_helper(int extra_locals_size, int monitor_size) { 2087 return (extra_locals_size + // the addition space for locals 2088 2*BytesPerWord + // return address and saved rbp 2089 2*BytesPerWord + // "static long no_params() method" issue 2090 sizeof(BytecodeInterpreter) + // interpreterState 2091 monitor_size); // monitors 2092 } 2093 2094 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill, 2095 frame* caller, 2096 frame* current, 2097 Method* method, 2098 intptr_t* locals, 2099 intptr_t* stack, 2100 intptr_t* stack_base, 2101 intptr_t* monitor_base, 2102 intptr_t* frame_bottom, 2103 bool is_top_frame 2104 ) 2105 { 2106 // What about any vtable? 2107 // 2108 to_fill->_thread = JavaThread::current(); 2109 // This gets filled in later but make it something recognizable for now 2110 to_fill->_bcp = method->code_base(); 2111 to_fill->_locals = locals; 2112 to_fill->_constants = method->constants()->cache(); 2113 to_fill->_method = method; 2114 to_fill->_mdx = NULL; 2115 to_fill->_stack = stack; 2116 if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) { 2117 to_fill->_msg = deopt_resume2; 2118 } else { 2119 to_fill->_msg = method_resume; 2120 } 2121 to_fill->_result._to_call._bcp_advance = 0; 2122 to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone 2123 to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone 2124 to_fill->_prev_link = NULL; 2125 2126 to_fill->_sender_sp = caller->unextended_sp(); 2127 2128 if (caller->is_interpreted_frame()) { 2129 interpreterState prev = caller->get_interpreterState(); 2130 to_fill->_prev_link = prev; 2131 // *current->register_addr(GR_Iprev_state) = (intptr_t) prev; 2132 // Make the prev callee look proper 2133 prev->_result._to_call._callee = method; 2134 if (*prev->_bcp == Bytecodes::_invokeinterface) { 2135 prev->_result._to_call._bcp_advance = 5; 2136 } else { 2137 prev->_result._to_call._bcp_advance = 3; 2138 } 2139 } 2140 to_fill->_oop_temp = NULL; 2141 to_fill->_stack_base = stack_base; 2142 // Need +1 here because stack_base points to the word just above the first expr stack entry 2143 // and stack_limit is supposed to point to the word just below the last expr stack entry. 2144 // See generate_compute_interpreter_state. 2145 to_fill->_stack_limit = stack_base - (method->max_stack() + 1); 2146 to_fill->_monitor_base = (BasicObjectLock*) monitor_base; 2147 2148 to_fill->_self_link = to_fill; 2149 assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base, 2150 "Stack top out of range"); 2151 } 2152 2153 2154 static int frame_size_helper(int max_stack, 2155 int tempcount, 2156 int moncount, 2157 int callee_param_count, 2158 int callee_locals, 2159 bool is_top_frame, 2160 int& monitor_size, 2161 int& full_frame_size) { 2162 int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord; 2163 monitor_size = sizeof(BasicObjectLock) * moncount; 2164 2165 // First calculate the frame size without any java expression stack 2166 int short_frame_size = size_activation_helper(extra_locals_size, 2167 monitor_size); 2168 2169 // Now with full size expression stack 2170 full_frame_size = short_frame_size + max_stack * BytesPerWord; 2171 2172 // and now with only live portion of the expression stack 2173 short_frame_size = short_frame_size + tempcount * BytesPerWord; 2174 2175 // the size the activation is right now. Only top frame is full size 2176 int frame_size = (is_top_frame ? full_frame_size : short_frame_size); 2177 return frame_size; 2178 } 2179 2180 int AbstractInterpreter::size_activation(int max_stack, 2181 int tempcount, 2182 int extra_args, 2183 int moncount, 2184 int callee_param_count, 2185 int callee_locals, 2186 bool is_top_frame) { 2187 assert(extra_args == 0, "FIX ME"); 2188 // NOTE: return size is in words not bytes 2189 2190 // Calculate the amount our frame will be adjust by the callee. For top frame 2191 // this is zero. 2192 2193 // NOTE: ia64 seems to do this wrong (or at least backwards) in that it 2194 // calculates the extra locals based on itself. Not what the callee does 2195 // to it. So it ignores last_frame_adjust value. Seems suspicious as far 2196 // as getting sender_sp correct. 2197 2198 int unused_monitor_size = 0; 2199 int unused_full_frame_size = 0; 2200 return frame_size_helper(max_stack, tempcount, moncount, callee_param_count, callee_locals, 2201 is_top_frame, unused_monitor_size, unused_full_frame_size)/BytesPerWord; 2202 } 2203 2204 void AbstractInterpreter::layout_activation(Method* method, 2205 int tempcount, // 2206 int popframe_extra_args, 2207 int moncount, 2208 int caller_actual_parameters, 2209 int callee_param_count, 2210 int callee_locals, 2211 frame* caller, 2212 frame* interpreter_frame, 2213 bool is_top_frame, 2214 bool is_bottom_frame) { 2215 2216 assert(popframe_extra_args == 0, "FIX ME"); 2217 // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state() 2218 // does as far as allocating an interpreter frame. 2219 // Set up the method, locals, and monitors. 2220 // The frame interpreter_frame is guaranteed to be the right size, 2221 // as determined by a previous call to the size_activation() method. 2222 // It is also guaranteed to be walkable even though it is in a skeletal state 2223 // NOTE: tempcount is the current size of the java expression stack. For top most 2224 // frames we will allocate a full sized expression stack and not the curback 2225 // version that non-top frames have. 2226 2227 int monitor_size = 0; 2228 int full_frame_size = 0; 2229 int frame_size = frame_size_helper(method->max_stack(), tempcount, moncount, callee_param_count, callee_locals, 2230 is_top_frame, monitor_size, full_frame_size); 2231 2232 #ifdef ASSERT 2233 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable"); 2234 #endif 2235 2236 // MUCHO HACK 2237 2238 intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size)); 2239 2240 /* Now fillin the interpreterState object */ 2241 2242 // The state object is the first thing on the frame and easily located 2243 2244 interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter)); 2245 2246 2247 // Find the locals pointer. This is rather simple on x86 because there is no 2248 // confusing rounding at the callee to account for. We can trivially locate 2249 // our locals based on the current fp(). 2250 // Note: the + 2 is for handling the "static long no_params() method" issue. 2251 // (too bad I don't really remember that issue well...) 2252 2253 intptr_t* locals; 2254 // If the caller is interpreted we need to make sure that locals points to the first 2255 // argument that the caller passed and not in an area where the stack might have been extended. 2256 // because the stack to stack to converter needs a proper locals value in order to remove the 2257 // arguments from the caller and place the result in the proper location. Hmm maybe it'd be 2258 // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code 2259 // adjust the stack?? HMMM QQQ 2260 // 2261 if (caller->is_interpreted_frame()) { 2262 // locals must agree with the caller because it will be used to set the 2263 // caller's tos when we return. 2264 interpreterState prev = caller->get_interpreterState(); 2265 // stack() is prepushed. 2266 locals = prev->stack() + method->size_of_parameters(); 2267 // locals = caller->unextended_sp() + (method->size_of_parameters() - 1); 2268 if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) { 2269 // os::breakpoint(); 2270 } 2271 } else { 2272 // this is where a c2i would have placed locals (except for the +2) 2273 locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2; 2274 } 2275 2276 intptr_t* monitor_base = (intptr_t*) cur_state; 2277 intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size); 2278 /* +1 because stack is always prepushed */ 2279 intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord); 2280 2281 2282 BytecodeInterpreter::layout_interpreterState(cur_state, 2283 caller, 2284 interpreter_frame, 2285 method, 2286 locals, 2287 stack, 2288 stack_base, 2289 monitor_base, 2290 frame_bottom, 2291 is_top_frame); 2292 2293 // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp()); 2294 } 2295 2296 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 2297 switch (method_kind(m)) { 2298 case Interpreter::java_lang_math_sin : // fall thru 2299 case Interpreter::java_lang_math_cos : // fall thru 2300 case Interpreter::java_lang_math_tan : // fall thru 2301 case Interpreter::java_lang_math_abs : // fall thru 2302 case Interpreter::java_lang_math_log : // fall thru 2303 case Interpreter::java_lang_math_log10 : // fall thru 2304 case Interpreter::java_lang_math_sqrt : // fall thru 2305 case Interpreter::java_lang_math_pow : // fall thru 2306 case Interpreter::java_lang_math_exp : 2307 return false; 2308 default: 2309 return true; 2310 } 2311 } 2312 2313 2314 #endif // CC_INTERP (all)