1 /* 2 * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/cppInterpreter.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterGenerator.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodDataOop.hpp" 34 #include "oops/methodOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/interfaceSupport.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #ifdef SHARK 49 #include "shark/shark_globals.hpp" 50 #endif 51 52 #ifdef CC_INTERP 53 54 // Routine exists to make tracebacks look decent in debugger 55 // while we are recursed in the frame manager/c++ interpreter. 56 // We could use an address in the frame manager but having 57 // frames look natural in the debugger is a plus. 58 extern "C" void RecursiveInterpreterActivation(interpreterState istate ) 59 { 60 // 61 ShouldNotReachHere(); 62 } 63 64 65 #define __ _masm-> 66 #define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name))) 67 68 Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized 69 // c++ interpreter entry point this holds that entry point label. 70 71 // default registers for state and sender_sp 72 // state and sender_sp are the same on 32bit because we have no choice. 73 // state could be rsi on 64bit but it is an arg reg and not callee save 74 // so r13 is better choice. 75 76 const Register state = NOT_LP64(rsi) LP64_ONLY(r13); 77 const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13); 78 79 // NEEDED for JVMTI? 80 // address AbstractInterpreter::_remove_activation_preserving_args_entry; 81 82 static address unctrap_frame_manager_entry = NULL; 83 84 static address deopt_frame_manager_return_atos = NULL; 85 static address deopt_frame_manager_return_btos = NULL; 86 static address deopt_frame_manager_return_itos = NULL; 87 static address deopt_frame_manager_return_ltos = NULL; 88 static address deopt_frame_manager_return_ftos = NULL; 89 static address deopt_frame_manager_return_dtos = NULL; 90 static address deopt_frame_manager_return_vtos = NULL; 91 92 int AbstractInterpreter::BasicType_as_index(BasicType type) { 93 int i = 0; 94 switch (type) { 95 case T_BOOLEAN: i = 0; break; 96 case T_CHAR : i = 1; break; 97 case T_BYTE : i = 2; break; 98 case T_SHORT : i = 3; break; 99 case T_INT : i = 4; break; 100 case T_VOID : i = 5; break; 101 case T_FLOAT : i = 8; break; 102 case T_LONG : i = 9; break; 103 case T_DOUBLE : i = 6; break; 104 case T_OBJECT : // fall through 105 case T_ARRAY : i = 7; break; 106 default : ShouldNotReachHere(); 107 } 108 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds"); 109 return i; 110 } 111 112 // Is this pc anywhere within code owned by the interpreter? 113 // This only works for pc that might possibly be exposed to frame 114 // walkers. It clearly misses all of the actual c++ interpreter 115 // implementation 116 bool CppInterpreter::contains(address pc) { 117 return (_code->contains(pc) || 118 pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation)); 119 } 120 121 122 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) { 123 address entry = __ pc(); 124 switch (type) { 125 case T_BOOLEAN: __ c2bool(rax); break; 126 case T_CHAR : __ andl(rax, 0xFFFF); break; 127 case T_BYTE : __ sign_extend_byte (rax); break; 128 case T_SHORT : __ sign_extend_short(rax); break; 129 case T_VOID : // fall thru 130 case T_LONG : // fall thru 131 case T_INT : /* nothing to do */ break; 132 133 case T_DOUBLE : 134 case T_FLOAT : 135 { 136 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 137 __ pop(t); // remove return address first 138 // Must return a result for interpreter or compiler. In SSE 139 // mode, results are returned in xmm0 and the FPU stack must 140 // be empty. 141 if (type == T_FLOAT && UseSSE >= 1) { 142 #ifndef _LP64 143 // Load ST0 144 __ fld_d(Address(rsp, 0)); 145 // Store as float and empty fpu stack 146 __ fstp_s(Address(rsp, 0)); 147 #endif // !_LP64 148 // and reload 149 __ movflt(xmm0, Address(rsp, 0)); 150 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 151 __ movdbl(xmm0, Address(rsp, 0)); 152 } else { 153 // restore ST0 154 __ fld_d(Address(rsp, 0)); 155 } 156 // and pop the temp 157 __ addptr(rsp, 2 * wordSize); 158 __ push(t); // restore return address 159 } 160 break; 161 case T_OBJECT : 162 // retrieve result from frame 163 __ movptr(rax, STATE(_oop_temp)); 164 // and verify it 165 __ verify_oop(rax); 166 break; 167 default : ShouldNotReachHere(); 168 } 169 __ ret(0); // return from result handler 170 return entry; 171 } 172 173 // tosca based result to c++ interpreter stack based result. 174 // Result goes to top of native stack. 175 176 #undef EXTEND // SHOULD NOT BE NEEDED 177 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) { 178 // A result is in the tosca (abi result) from either a native method call or compiled 179 // code. Place this result on the java expression stack so C++ interpreter can use it. 180 address entry = __ pc(); 181 182 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 183 __ pop(t); // remove return address first 184 switch (type) { 185 case T_VOID: 186 break; 187 case T_BOOLEAN: 188 #ifdef EXTEND 189 __ c2bool(rax); 190 #endif 191 __ push(rax); 192 break; 193 case T_CHAR : 194 #ifdef EXTEND 195 __ andl(rax, 0xFFFF); 196 #endif 197 __ push(rax); 198 break; 199 case T_BYTE : 200 #ifdef EXTEND 201 __ sign_extend_byte (rax); 202 #endif 203 __ push(rax); 204 break; 205 case T_SHORT : 206 #ifdef EXTEND 207 __ sign_extend_short(rax); 208 #endif 209 __ push(rax); 210 break; 211 case T_LONG : 212 __ push(rdx); // pushes useless junk on 64bit 213 __ push(rax); 214 break; 215 case T_INT : 216 __ push(rax); 217 break; 218 case T_FLOAT : 219 // Result is in ST(0)/xmm0 220 __ subptr(rsp, wordSize); 221 if ( UseSSE < 1) { 222 __ fstp_s(Address(rsp, 0)); 223 } else { 224 __ movflt(Address(rsp, 0), xmm0); 225 } 226 break; 227 case T_DOUBLE : 228 __ subptr(rsp, 2*wordSize); 229 if ( UseSSE < 2 ) { 230 __ fstp_d(Address(rsp, 0)); 231 } else { 232 __ movdbl(Address(rsp, 0), xmm0); 233 } 234 break; 235 case T_OBJECT : 236 __ verify_oop(rax); // verify it 237 __ push(rax); 238 break; 239 default : ShouldNotReachHere(); 240 } 241 __ jmp(t); // return from result handler 242 return entry; 243 } 244 245 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) { 246 // A result is in the java expression stack of the interpreted method that has just 247 // returned. Place this result on the java expression stack of the caller. 248 // 249 // The current interpreter activation in rsi/r13 is for the method just returning its 250 // result. So we know that the result of this method is on the top of the current 251 // execution stack (which is pre-pushed) and will be return to the top of the caller 252 // stack. The top of the callers stack is the bottom of the locals of the current 253 // activation. 254 // Because of the way activation are managed by the frame manager the value of rsp is 255 // below both the stack top of the current activation and naturally the stack top 256 // of the calling activation. This enable this routine to leave the return address 257 // to the frame manager on the stack and do a vanilla return. 258 // 259 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result 260 // On Return: rsi/r13 - unchanged 261 // rax - new stack top for caller activation (i.e. activation in _prev_link) 262 // 263 // Can destroy rdx, rcx. 264 // 265 266 address entry = __ pc(); 267 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 268 switch (type) { 269 case T_VOID: 270 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value 271 __ addptr(rax, wordSize); // account for prepush before we return 272 break; 273 case T_FLOAT : 274 case T_BOOLEAN: 275 case T_CHAR : 276 case T_BYTE : 277 case T_SHORT : 278 case T_INT : 279 // 1 word result 280 __ movptr(rdx, STATE(_stack)); 281 __ movptr(rax, STATE(_locals)); // address for result 282 __ movl(rdx, Address(rdx, wordSize)); // get result 283 __ movptr(Address(rax, 0), rdx); // and store it 284 break; 285 case T_LONG : 286 case T_DOUBLE : 287 // return top two words on current expression stack to caller's expression stack 288 // The caller's expression stack is adjacent to the current frame manager's intepretState 289 // except we allocated one extra word for this intepretState so we won't overwrite it 290 // when we return a two word result. 291 292 __ movptr(rax, STATE(_locals)); // address for result 293 __ movptr(rcx, STATE(_stack)); 294 __ subptr(rax, wordSize); // need addition word besides locals[0] 295 __ movptr(rdx, Address(rcx, 2*wordSize)); // get result word (junk in 64bit) 296 __ movptr(Address(rax, wordSize), rdx); // and store it 297 __ movptr(rdx, Address(rcx, wordSize)); // get result word 298 __ movptr(Address(rax, 0), rdx); // and store it 299 break; 300 case T_OBJECT : 301 __ movptr(rdx, STATE(_stack)); 302 __ movptr(rax, STATE(_locals)); // address for result 303 __ movptr(rdx, Address(rdx, wordSize)); // get result 304 __ verify_oop(rdx); // verify it 305 __ movptr(Address(rax, 0), rdx); // and store it 306 break; 307 default : ShouldNotReachHere(); 308 } 309 __ ret(0); 310 return entry; 311 } 312 313 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) { 314 // A result is in the java expression stack of the interpreted method that has just 315 // returned. Place this result in the native abi that the caller expects. 316 // 317 // Similar to generate_stack_to_stack_converter above. Called at a similar time from the 318 // frame manager execept in this situation the caller is native code (c1/c2/call_stub) 319 // and so rather than return result onto caller's java expression stack we return the 320 // result in the expected location based on the native abi. 321 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result 322 // On Return: rsi/r13 - unchanged 323 // Other registers changed [rax/rdx/ST(0) as needed for the result returned] 324 325 address entry = __ pc(); 326 switch (type) { 327 case T_VOID: 328 break; 329 case T_BOOLEAN: 330 case T_CHAR : 331 case T_BYTE : 332 case T_SHORT : 333 case T_INT : 334 __ movptr(rdx, STATE(_stack)); // get top of stack 335 __ movl(rax, Address(rdx, wordSize)); // get result word 1 336 break; 337 case T_LONG : 338 __ movptr(rdx, STATE(_stack)); // get top of stack 339 __ movptr(rax, Address(rdx, wordSize)); // get result low word 340 NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));) // get result high word 341 break; 342 case T_FLOAT : 343 __ movptr(rdx, STATE(_stack)); // get top of stack 344 if ( UseSSE >= 1) { 345 __ movflt(xmm0, Address(rdx, wordSize)); 346 } else { 347 __ fld_s(Address(rdx, wordSize)); // pushd float result 348 } 349 break; 350 case T_DOUBLE : 351 __ movptr(rdx, STATE(_stack)); // get top of stack 352 if ( UseSSE > 1) { 353 __ movdbl(xmm0, Address(rdx, wordSize)); 354 } else { 355 __ fld_d(Address(rdx, wordSize)); // push double result 356 } 357 break; 358 case T_OBJECT : 359 __ movptr(rdx, STATE(_stack)); // get top of stack 360 __ movptr(rax, Address(rdx, wordSize)); // get result word 1 361 __ verify_oop(rax); // verify it 362 break; 363 default : ShouldNotReachHere(); 364 } 365 __ ret(0); 366 return entry; 367 } 368 369 address CppInterpreter::return_entry(TosState state, int length) { 370 // make it look good in the debugger 371 return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation); 372 } 373 374 address CppInterpreter::deopt_entry(TosState state, int length) { 375 address ret = NULL; 376 if (length != 0) { 377 switch (state) { 378 case atos: ret = deopt_frame_manager_return_atos; break; 379 case btos: ret = deopt_frame_manager_return_btos; break; 380 case ctos: 381 case stos: 382 case itos: ret = deopt_frame_manager_return_itos; break; 383 case ltos: ret = deopt_frame_manager_return_ltos; break; 384 case ftos: ret = deopt_frame_manager_return_ftos; break; 385 case dtos: ret = deopt_frame_manager_return_dtos; break; 386 case vtos: ret = deopt_frame_manager_return_vtos; break; 387 } 388 } else { 389 ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap) 390 } 391 assert(ret != NULL, "Not initialized"); 392 return ret; 393 } 394 395 // C++ Interpreter 396 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state, 397 const Register locals, 398 const Register sender_sp, 399 bool native) { 400 401 // On entry the "locals" argument points to locals[0] (or where it would be in case no locals in 402 // a static method). "state" contains any previous frame manager state which we must save a link 403 // to in the newly generated state object. On return "state" is a pointer to the newly allocated 404 // state object. We must allocate and initialize a new interpretState object and the method 405 // expression stack. Because the returned result (if any) of the method will be placed on the caller's 406 // expression stack and this will overlap with locals[0] (and locals[1] if double/long) we must 407 // be sure to leave space on the caller's stack so that this result will not overwrite values when 408 // locals[0] and locals[1] do not exist (and in fact are return address and saved rbp). So when 409 // we are non-native we in essence ensure that locals[0-1] exist. We play an extra trick in 410 // non-product builds and initialize this last local with the previous interpreterState as 411 // this makes things look real nice in the debugger. 412 413 // State on entry 414 // Assumes locals == &locals[0] 415 // Assumes state == any previous frame manager state (assuming call path from c++ interpreter) 416 // Assumes rax = return address 417 // rcx == senders_sp 418 // rbx == method 419 // Modifies rcx, rdx, rax 420 // Returns: 421 // state == address of new interpreterState 422 // rsp == bottom of method's expression stack. 423 424 const Address const_offset (rbx, methodOopDesc::const_offset()); 425 426 427 // On entry sp is the sender's sp. This includes the space for the arguments 428 // that the sender pushed. If the sender pushed no args (a static) and the 429 // caller returns a long then we need two words on the sender's stack which 430 // are not present (although when we return a restore full size stack the 431 // space will be present). If we didn't allocate two words here then when 432 // we "push" the result of the caller's stack we would overwrite the return 433 // address and the saved rbp. Not good. So simply allocate 2 words now 434 // just to be safe. This is the "static long no_params() method" issue. 435 // See Lo.java for a testcase. 436 // We don't need this for native calls because they return result in 437 // register and the stack is expanded in the caller before we store 438 // the results on the stack. 439 440 if (!native) { 441 #ifdef PRODUCT 442 __ subptr(rsp, 2*wordSize); 443 #else /* PRODUCT */ 444 __ push((int32_t)NULL_WORD); 445 __ push(state); // make it look like a real argument 446 #endif /* PRODUCT */ 447 } 448 449 // Now that we are assure of space for stack result, setup typical linkage 450 451 __ push(rax); 452 __ enter(); 453 454 __ mov(rax, state); // save current state 455 456 __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter))); 457 __ mov(state, rsp); 458 459 // rsi/r13 == state/locals rax == prevstate 460 461 // initialize the "shadow" frame so that use since C++ interpreter not directly 462 // recursive. Simpler to recurse but we can't trim expression stack as we call 463 // new methods. 464 __ movptr(STATE(_locals), locals); // state->_locals = locals() 465 __ movptr(STATE(_self_link), state); // point to self 466 __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state) 467 __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp 468 #ifdef _LP64 469 __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes() 470 #else 471 __ get_thread(rax); // get vm's javathread* 472 __ movptr(STATE(_thread), rax); // state->_bcp = codes() 473 #endif // _LP64 474 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); // get constantMethodOop 475 __ lea(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // get code base 476 if (native) { 477 __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL 478 } else { 479 __ movptr(STATE(_bcp), rdx); // state->_bcp = codes() 480 } 481 __ xorptr(rdx, rdx); 482 __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native) 483 __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL 484 __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset())); 485 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); 486 __ movptr(STATE(_constants), rdx); // state->_constants = constants() 487 488 __ movptr(STATE(_method), rbx); // state->_method = method() 489 __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry 490 __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL 491 492 493 __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0] 494 // entries run from -1..x where &monitor[x] == 495 496 { 497 // Must not attempt to lock method until we enter interpreter as gc won't be able to find the 498 // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack 499 // immediately. 500 501 // synchronize method 502 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); 503 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 504 Label not_synced; 505 506 __ movl(rax, access_flags); 507 __ testl(rax, JVM_ACC_SYNCHRONIZED); 508 __ jcc(Assembler::zero, not_synced); 509 510 // Allocate initial monitor and pre initialize it 511 // get synchronization object 512 513 Label done; 514 const int mirror_offset = Klass::java_mirror_offset_in_bytes(); 515 __ movl(rax, access_flags); 516 __ testl(rax, JVM_ACC_STATIC); 517 __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case) 518 __ jcc(Assembler::zero, done); 519 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset())); 520 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); 521 __ movptr(rax, Address(rax, mirror_offset)); 522 __ bind(done); 523 // add space for monitor & lock 524 __ subptr(rsp, entry_size); // add space for a monitor entry 525 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object 526 __ bind(not_synced); 527 } 528 529 __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count]) 530 if (native) { 531 __ movptr(STATE(_stack), rsp); // set current expression stack tos 532 __ movptr(STATE(_stack_limit), rsp); 533 } else { 534 __ subptr(rsp, wordSize); // pre-push stack 535 __ movptr(STATE(_stack), rsp); // set current expression stack tos 536 537 // compute full expression stack limit 538 539 const Address size_of_stack (rbx, methodOopDesc::max_stack_offset()); 540 const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_words(); 541 __ load_unsigned_short(rdx, size_of_stack); // get size of expression stack in words 542 __ negptr(rdx); // so we can subtract in next step 543 // Allocate expression stack 544 __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -extra_stack)); 545 __ movptr(STATE(_stack_limit), rsp); 546 } 547 548 #ifdef _LP64 549 // Make sure stack is properly aligned and sized for the abi 550 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 551 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 552 #endif // _LP64 553 554 555 556 } 557 558 // Helpers for commoning out cases in the various type of method entries. 559 // 560 561 // increment invocation count & check for overflow 562 // 563 // Note: checking for negative value instead of overflow 564 // so we have a 'sticky' overflow test 565 // 566 // rbx,: method 567 // rcx: invocation counter 568 // 569 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 570 571 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); 572 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset()); 573 574 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 575 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); 576 } 577 // Update standard invocation counters 578 __ movl(rax, backedge_counter); // load backedge counter 579 580 __ increment(rcx, InvocationCounter::count_increment); 581 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 582 583 __ movl(invocation_counter, rcx); // save invocation count 584 __ addl(rcx, rax); // add both counters 585 586 // profile_method is non-null only for interpreted method so 587 // profile_method != NULL == !native_call 588 // BytecodeInterpreter only calls for native so code is elided. 589 590 __ cmp32(rcx, 591 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 592 __ jcc(Assembler::aboveEqual, *overflow); 593 594 } 595 596 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 597 598 // C++ interpreter on entry 599 // rsi/r13 - new interpreter state pointer 600 // rbp - interpreter frame pointer 601 // rbx - method 602 603 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 604 // rbx, - method 605 // rcx - rcvr (assuming there is one) 606 // top of stack return address of interpreter caller 607 // rsp - sender_sp 608 609 // C++ interpreter only 610 // rsi/r13 - previous interpreter state pointer 611 612 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); 613 614 // InterpreterRuntime::frequency_counter_overflow takes one argument 615 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 616 // The call returns the address of the verified entry point for the method or NULL 617 // if the compilation did not complete (either went background or bailed out). 618 __ movptr(rax, (int32_t)false); 619 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); 620 621 // for c++ interpreter can rsi really be munged? 622 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); // restore state 623 __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method))); // restore method 624 __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer 625 626 __ jmp(*do_continue, relocInfo::none); 627 628 } 629 630 void InterpreterGenerator::generate_stack_overflow_check(void) { 631 // see if we've got enough room on the stack for locals plus overhead. 632 // the expression stack grows down incrementally, so the normal guard 633 // page mechanism will work for that. 634 // 635 // Registers live on entry: 636 // 637 // Asm interpreter 638 // rdx: number of additional locals this frame needs (what we must check) 639 // rbx,: methodOop 640 641 // C++ Interpreter 642 // rsi/r13: previous interpreter frame state object 643 // rdi: &locals[0] 644 // rcx: # of locals 645 // rdx: number of additional locals this frame needs (what we must check) 646 // rbx: methodOop 647 648 // destroyed on exit 649 // rax, 650 651 // NOTE: since the additional locals are also always pushed (wasn't obvious in 652 // generate_method_entry) so the guard should work for them too. 653 // 654 655 // monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp 656 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 657 658 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). 659 // be sure to change this if you add/subtract anything to/from the overhead area 660 const int overhead_size = (int)sizeof(BytecodeInterpreter); 661 662 const int page_size = os::vm_page_size(); 663 664 Label after_frame_check; 665 666 // compute rsp as if this were going to be the last frame on 667 // the stack before the red zone 668 669 Label after_frame_check_pop; 670 671 // save rsi == caller's bytecode ptr (c++ previous interp. state) 672 // QQQ problem here?? rsi overload???? 673 __ push(state); 674 675 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi); 676 677 NOT_LP64(__ get_thread(thread)); 678 679 const Address stack_base(thread, Thread::stack_base_offset()); 680 const Address stack_size(thread, Thread::stack_size_offset()); 681 682 // locals + overhead, in bytes 683 const Address size_of_stack (rbx, methodOopDesc::max_stack_offset()); 684 // Always give one monitor to allow us to start interp if sync method. 685 // Any additional monitors need a check when moving the expression stack 686 const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize; 687 const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); 688 __ load_unsigned_short(rax, size_of_stack); // get size of expression stack in words 689 __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor)); 690 __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size)); 691 692 #ifdef ASSERT 693 Label stack_base_okay, stack_size_okay; 694 // verify that thread stack base is non-zero 695 __ cmpptr(stack_base, (int32_t)0); 696 __ jcc(Assembler::notEqual, stack_base_okay); 697 __ stop("stack base is zero"); 698 __ bind(stack_base_okay); 699 // verify that thread stack size is non-zero 700 __ cmpptr(stack_size, (int32_t)0); 701 __ jcc(Assembler::notEqual, stack_size_okay); 702 __ stop("stack size is zero"); 703 __ bind(stack_size_okay); 704 #endif 705 706 // Add stack base to locals and subtract stack size 707 __ addptr(rax, stack_base); 708 __ subptr(rax, stack_size); 709 710 // We should have a magic number here for the size of the c++ interpreter frame. 711 // We can't actually tell this ahead of time. The debug version size is around 3k 712 // product is 1k and fastdebug is 4k 713 const int slop = 6 * K; 714 715 // Use the maximum number of pages we might bang. 716 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 717 (StackRedPages+StackYellowPages); 718 // Only need this if we are stack banging which is temporary while 719 // we're debugging. 720 __ addptr(rax, slop + 2*max_pages * page_size); 721 722 // check against the current stack bottom 723 __ cmpptr(rsp, rax); 724 __ jcc(Assembler::above, after_frame_check_pop); 725 726 __ pop(state); // get c++ prev state. 727 728 // throw exception return address becomes throwing pc 729 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 730 731 // all done with frame size check 732 __ bind(after_frame_check_pop); 733 __ pop(state); 734 735 __ bind(after_frame_check); 736 } 737 738 // Find preallocated monitor and lock method (C++ interpreter) 739 // rbx - methodOop 740 // 741 void InterpreterGenerator::lock_method(void) { 742 // assumes state == rsi/r13 == pointer to current interpreterState 743 // minimally destroys rax, rdx|c_rarg1, rdi 744 // 745 // synchronize method 746 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 747 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); 748 749 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 750 751 // find initial monitor i.e. monitors[-1] 752 __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit 753 __ subptr(monitor, entry_size); // point to initial monitor 754 755 #ifdef ASSERT 756 { Label L; 757 __ movl(rax, access_flags); 758 __ testl(rax, JVM_ACC_SYNCHRONIZED); 759 __ jcc(Assembler::notZero, L); 760 __ stop("method doesn't need synchronization"); 761 __ bind(L); 762 } 763 #endif // ASSERT 764 // get synchronization object 765 { Label done; 766 const int mirror_offset = Klass::java_mirror_offset_in_bytes(); 767 __ movl(rax, access_flags); 768 __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case) 769 __ testl(rax, JVM_ACC_STATIC); 770 __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case) 771 __ jcc(Assembler::zero, done); 772 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset())); 773 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); 774 __ movptr(rax, Address(rax, mirror_offset)); 775 __ bind(done); 776 } 777 #ifdef ASSERT 778 { Label L; 779 __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object? 780 __ jcc(Assembler::equal, L); 781 __ stop("wrong synchronization lobject"); 782 __ bind(L); 783 } 784 #endif // ASSERT 785 // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi! 786 __ lock_object(monitor); 787 } 788 789 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry 790 791 address InterpreterGenerator::generate_accessor_entry(void) { 792 793 // rbx: methodOop 794 795 // rsi/r13: senderSP must preserved for slow path, set SP to it on fast path 796 797 Label xreturn_path; 798 799 // do fastpath for resolved accessor methods 800 if (UseFastAccessorMethods) { 801 802 address entry_point = __ pc(); 803 804 Label slow_path; 805 // If we need a safepoint check, generate full interpreter entry. 806 ExternalAddress state(SafepointSynchronize::address_of_state()); 807 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 808 SafepointSynchronize::_not_synchronized); 809 810 __ jcc(Assembler::notEqual, slow_path); 811 // ASM/C++ Interpreter 812 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1 813 // Note: We can only use this code if the getfield has been resolved 814 // and if we don't have a null-pointer exception => check for 815 // these conditions first and use slow path if necessary. 816 // rbx,: method 817 // rcx: receiver 818 __ movptr(rax, Address(rsp, wordSize)); 819 820 // check if local 0 != NULL and read field 821 __ testptr(rax, rax); 822 __ jcc(Assembler::zero, slow_path); 823 824 __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset())); 825 // read first instruction word and extract bytecode @ 1 and index @ 2 826 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); 827 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); 828 // Shift codes right to get the index on the right. 829 // The bytecode fetched looks like <index><0xb4><0x2a> 830 __ shrl(rdx, 2*BitsPerByte); 831 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); 832 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); 833 834 // rax,: local 0 835 // rbx,: method 836 // rcx: receiver - do not destroy since it is needed for slow path! 837 // rcx: scratch 838 // rdx: constant pool cache index 839 // rdi: constant pool cache 840 // rsi/r13: sender sp 841 842 // check if getfield has been resolved and read constant pool cache entry 843 // check the validity of the cache entry by testing whether _indices field 844 // contains Bytecode::_getfield in b1 byte. 845 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); 846 __ movl(rcx, 847 Address(rdi, 848 rdx, 849 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); 850 __ shrl(rcx, 2*BitsPerByte); 851 __ andl(rcx, 0xFF); 852 __ cmpl(rcx, Bytecodes::_getfield); 853 __ jcc(Assembler::notEqual, slow_path); 854 855 // Note: constant pool entry is not valid before bytecode is resolved 856 __ movptr(rcx, 857 Address(rdi, 858 rdx, 859 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())); 860 __ movl(rdx, 861 Address(rdi, 862 rdx, 863 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); 864 865 Label notByte, notShort, notChar; 866 const Address field_address (rax, rcx, Address::times_1); 867 868 // Need to differentiate between igetfield, agetfield, bgetfield etc. 869 // because they are different sizes. 870 // Use the type from the constant pool cache 871 __ shrl(rdx, ConstantPoolCacheEntry::tosBits); 872 // Make sure we don't need to mask rdx for tosBits after the above shift 873 ConstantPoolCacheEntry::verify_tosBits(); 874 #ifdef _LP64 875 Label notObj; 876 __ cmpl(rdx, atos); 877 __ jcc(Assembler::notEqual, notObj); 878 // atos 879 __ movptr(rax, field_address); 880 __ jmp(xreturn_path); 881 882 __ bind(notObj); 883 #endif // _LP64 884 __ cmpl(rdx, btos); 885 __ jcc(Assembler::notEqual, notByte); 886 __ load_signed_byte(rax, field_address); 887 __ jmp(xreturn_path); 888 889 __ bind(notByte); 890 __ cmpl(rdx, stos); 891 __ jcc(Assembler::notEqual, notShort); 892 __ load_signed_short(rax, field_address); 893 __ jmp(xreturn_path); 894 895 __ bind(notShort); 896 __ cmpl(rdx, ctos); 897 __ jcc(Assembler::notEqual, notChar); 898 __ load_unsigned_short(rax, field_address); 899 __ jmp(xreturn_path); 900 901 __ bind(notChar); 902 #ifdef ASSERT 903 Label okay; 904 #ifndef _LP64 905 __ cmpl(rdx, atos); 906 __ jcc(Assembler::equal, okay); 907 #endif // _LP64 908 __ cmpl(rdx, itos); 909 __ jcc(Assembler::equal, okay); 910 __ stop("what type is this?"); 911 __ bind(okay); 912 #endif // ASSERT 913 // All the rest are a 32 bit wordsize 914 __ movl(rax, field_address); 915 916 __ bind(xreturn_path); 917 918 // _ireturn/_areturn 919 __ pop(rdi); // get return address 920 __ mov(rsp, sender_sp_on_entry); // set sp to sender sp 921 __ jmp(rdi); 922 923 // generate a vanilla interpreter entry as the slow path 924 __ bind(slow_path); 925 // We will enter c++ interpreter looking like it was 926 // called by the call_stub this will cause it to return 927 // a tosca result to the invoker which might have been 928 // the c++ interpreter itself. 929 930 __ jmp(fast_accessor_slow_entry_path); 931 return entry_point; 932 933 } else { 934 return NULL; 935 } 936 937 } 938 939 address InterpreterGenerator::generate_Reference_get_entry(void) { 940 #ifndef SERIALGC 941 if (UseG1GC) { 942 // We need to generate have a routine that generates code to: 943 // * load the value in the referent field 944 // * passes that value to the pre-barrier. 945 // 946 // In the case of G1 this will record the value of the 947 // referent in an SATB buffer if marking is active. 948 // This will cause concurrent marking to mark the referent 949 // field as live. 950 Unimplemented(); 951 } 952 #endif // SERIALGC 953 954 // If G1 is not enabled then attempt to go through the accessor entry point 955 // Reference.get is an accessor 956 return generate_accessor_entry(); 957 } 958 959 // 960 // C++ Interpreter stub for calling a native method. 961 // This sets up a somewhat different looking stack for calling the native method 962 // than the typical interpreter frame setup but still has the pointer to 963 // an interpreter state. 964 // 965 966 address InterpreterGenerator::generate_native_entry(bool synchronized) { 967 // determine code generation flags 968 bool inc_counter = UseCompiler || CountCompiledCalls; 969 970 // rbx: methodOop 971 // rcx: receiver (unused) 972 // rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve 973 // in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless 974 // to save/restore. 975 address entry_point = __ pc(); 976 977 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); 978 const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset()); 979 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); 980 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); 981 982 // rsi/r13 == state/locals rdi == prevstate 983 const Register locals = rdi; 984 985 // get parameter size (always needed) 986 __ load_unsigned_short(rcx, size_of_parameters); 987 988 // rbx: methodOop 989 // rcx: size of parameters 990 __ pop(rax); // get return address 991 // for natives the size of locals is zero 992 993 // compute beginning of parameters /locals 994 __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize)); 995 996 // initialize fixed part of activation frame 997 998 // Assumes rax = return address 999 1000 // allocate and initialize new interpreterState and method expression stack 1001 // IN(locals) -> locals 1002 // IN(state) -> previous frame manager state (NULL from stub/c1/c2) 1003 // destroys rax, rcx, rdx 1004 // OUT (state) -> new interpreterState 1005 // OUT(rsp) -> bottom of methods expression stack 1006 1007 // save sender_sp 1008 __ mov(rcx, sender_sp_on_entry); 1009 // start with NULL previous state 1010 __ movptr(state, (int32_t)NULL_WORD); 1011 generate_compute_interpreter_state(state, locals, rcx, true); 1012 1013 #ifdef ASSERT 1014 { Label L; 1015 __ movptr(rax, STATE(_stack_base)); 1016 #ifdef _LP64 1017 // duplicate the alignment rsp got after setting stack_base 1018 __ subptr(rax, frame::arg_reg_save_area_bytes); // windows 1019 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI) 1020 #endif // _LP64 1021 __ cmpptr(rax, rsp); 1022 __ jcc(Assembler::equal, L); 1023 __ stop("broken stack frame setup in interpreter"); 1024 __ bind(L); 1025 } 1026 #endif 1027 1028 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count 1029 1030 const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax); 1031 NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread 1032 // Since at this point in the method invocation the exception handler 1033 // would try to exit the monitor of synchronized methods which hasn't 1034 // been entered yet, we set the thread local variable 1035 // _do_not_unlock_if_synchronized to true. The remove_activation will 1036 // check this flag. 1037 1038 const Address do_not_unlock_if_synchronized(unlock_thread, 1039 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1040 __ movbool(do_not_unlock_if_synchronized, true); 1041 1042 // make sure method is native & not abstract 1043 #ifdef ASSERT 1044 __ movl(rax, access_flags); 1045 { 1046 Label L; 1047 __ testl(rax, JVM_ACC_NATIVE); 1048 __ jcc(Assembler::notZero, L); 1049 __ stop("tried to execute non-native method as native"); 1050 __ bind(L); 1051 } 1052 { Label L; 1053 __ testl(rax, JVM_ACC_ABSTRACT); 1054 __ jcc(Assembler::zero, L); 1055 __ stop("tried to execute abstract method in interpreter"); 1056 __ bind(L); 1057 } 1058 #endif 1059 1060 1061 // increment invocation count & check for overflow 1062 Label invocation_counter_overflow; 1063 if (inc_counter) { 1064 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1065 } 1066 1067 Label continue_after_compile; 1068 1069 __ bind(continue_after_compile); 1070 1071 bang_stack_shadow_pages(true); 1072 1073 // reset the _do_not_unlock_if_synchronized flag 1074 NOT_LP64(__ movl(rax, STATE(_thread));) // get thread 1075 __ movbool(do_not_unlock_if_synchronized, false); 1076 1077 1078 // check for synchronized native methods 1079 // 1080 // Note: This must happen *after* invocation counter check, since 1081 // when overflow happens, the method should not be locked. 1082 if (synchronized) { 1083 // potentially kills rax, rcx, rdx, rdi 1084 lock_method(); 1085 } else { 1086 // no synchronization necessary 1087 #ifdef ASSERT 1088 { Label L; 1089 __ movl(rax, access_flags); 1090 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1091 __ jcc(Assembler::zero, L); 1092 __ stop("method needs synchronization"); 1093 __ bind(L); 1094 } 1095 #endif 1096 } 1097 1098 // start execution 1099 1100 // jvmti support 1101 __ notify_method_entry(); 1102 1103 // work registers 1104 const Register method = rbx; 1105 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi); 1106 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1 1107 1108 // allocate space for parameters 1109 __ movptr(method, STATE(_method)); 1110 __ verify_oop(method); 1111 __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset())); 1112 __ shll(t, 2); 1113 #ifdef _LP64 1114 __ subptr(rsp, t); 1115 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1116 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 1117 #else 1118 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 1119 __ subptr(rsp, t); 1120 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 1121 #endif // _LP64 1122 1123 // get signature handler 1124 Label pending_exception_present; 1125 1126 { Label L; 1127 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); 1128 __ testptr(t, t); 1129 __ jcc(Assembler::notZero, L); 1130 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false); 1131 __ movptr(method, STATE(_method)); 1132 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1133 __ jcc(Assembler::notEqual, pending_exception_present); 1134 __ verify_oop(method); 1135 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); 1136 __ bind(L); 1137 } 1138 #ifdef ASSERT 1139 { 1140 Label L; 1141 __ push(t); 1142 __ get_thread(t); // get vm's javathread* 1143 __ cmpptr(t, STATE(_thread)); 1144 __ jcc(Assembler::equal, L); 1145 __ int3(); 1146 __ bind(L); 1147 __ pop(t); 1148 } 1149 #endif // 1150 1151 const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from(); 1152 // call signature handler 1153 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code"); 1154 1155 // The generated handlers do not touch RBX (the method oop). 1156 // However, large signatures cannot be cached and are generated 1157 // each time here. The slow-path generator will blow RBX 1158 // sometime, so we must reload it after the call. 1159 __ movptr(from_ptr, STATE(_locals)); // get the from pointer 1160 __ call(t); 1161 __ movptr(method, STATE(_method)); 1162 __ verify_oop(method); 1163 1164 // result handler is in rax 1165 // set result handler 1166 __ movptr(STATE(_result_handler), rax); 1167 1168 1169 // get native function entry point 1170 { Label L; 1171 __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); 1172 __ testptr(rax, rax); 1173 __ jcc(Assembler::notZero, L); 1174 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 1175 __ movptr(method, STATE(_method)); 1176 __ verify_oop(method); 1177 __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); 1178 __ bind(L); 1179 } 1180 1181 // pass mirror handle if static call 1182 { Label L; 1183 const int mirror_offset = Klass::java_mirror_offset_in_bytes(); 1184 __ movl(t, Address(method, methodOopDesc::access_flags_offset())); 1185 __ testl(t, JVM_ACC_STATIC); 1186 __ jcc(Assembler::zero, L); 1187 // get mirror 1188 __ movptr(t, Address(method, methodOopDesc:: constants_offset())); 1189 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); 1190 __ movptr(t, Address(t, mirror_offset)); 1191 // copy mirror into activation object 1192 __ movptr(STATE(_oop_temp), t); 1193 // pass handle to mirror 1194 #ifdef _LP64 1195 __ lea(c_rarg1, STATE(_oop_temp)); 1196 #else 1197 __ lea(t, STATE(_oop_temp)); 1198 __ movptr(Address(rsp, wordSize), t); 1199 #endif // _LP64 1200 __ bind(L); 1201 } 1202 #ifdef ASSERT 1203 { 1204 Label L; 1205 __ push(t); 1206 __ get_thread(t); // get vm's javathread* 1207 __ cmpptr(t, STATE(_thread)); 1208 __ jcc(Assembler::equal, L); 1209 __ int3(); 1210 __ bind(L); 1211 __ pop(t); 1212 } 1213 #endif // 1214 1215 // pass JNIEnv 1216 #ifdef _LP64 1217 __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset())); 1218 #else 1219 __ movptr(thread, STATE(_thread)); // get thread 1220 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1221 1222 __ movptr(Address(rsp, 0), t); 1223 #endif // _LP64 1224 1225 #ifdef ASSERT 1226 { 1227 Label L; 1228 __ push(t); 1229 __ get_thread(t); // get vm's javathread* 1230 __ cmpptr(t, STATE(_thread)); 1231 __ jcc(Assembler::equal, L); 1232 __ int3(); 1233 __ bind(L); 1234 __ pop(t); 1235 } 1236 #endif // 1237 1238 #ifdef ASSERT 1239 { Label L; 1240 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1241 __ cmpl(t, _thread_in_Java); 1242 __ jcc(Assembler::equal, L); 1243 __ stop("Wrong thread state in native stub"); 1244 __ bind(L); 1245 } 1246 #endif 1247 1248 // Change state to native (we save the return address in the thread, since it might not 1249 // be pushed on the stack when we do a a stack traversal). It is enough that the pc() 1250 // points into the right code segment. It does not have to be the correct return pc. 1251 1252 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1253 1254 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); 1255 1256 __ call(rax); 1257 1258 // result potentially in rdx:rax or ST0 1259 __ movptr(method, STATE(_method)); 1260 NOT_LP64(__ movptr(thread, STATE(_thread));) // get thread 1261 1262 // The potential result is in ST(0) & rdx:rax 1263 // With C++ interpreter we leave any possible result in ST(0) until we are in result handler and then 1264 // we do the appropriate stuff for returning the result. rdx:rax must always be saved because just about 1265 // anything we do here will destroy it, st(0) is only saved if we re-enter the vm where it would 1266 // be destroyed. 1267 // It is safe to do these pushes because state is _thread_in_native and return address will be found 1268 // via _last_native_pc and not via _last_jave_sp 1269 1270 // Must save the value of ST(0)/xmm0 since it could be destroyed before we get to result handler 1271 { Label Lpush, Lskip; 1272 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1273 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1274 __ cmpptr(STATE(_result_handler), float_handler.addr()); 1275 __ jcc(Assembler::equal, Lpush); 1276 __ cmpptr(STATE(_result_handler), double_handler.addr()); 1277 __ jcc(Assembler::notEqual, Lskip); 1278 __ bind(Lpush); 1279 __ subptr(rsp, 2*wordSize); 1280 if ( UseSSE < 2 ) { 1281 __ fstp_d(Address(rsp, 0)); 1282 } else { 1283 __ movdbl(Address(rsp, 0), xmm0); 1284 } 1285 __ bind(Lskip); 1286 } 1287 1288 // save rax:rdx for potential use by result handler. 1289 __ push(rax); 1290 #ifndef _LP64 1291 __ push(rdx); 1292 #endif // _LP64 1293 1294 // Either restore the MXCSR register after returning from the JNI Call 1295 // or verify that it wasn't changed. 1296 if (VM_Version::supports_sse()) { 1297 if (RestoreMXCSROnJNICalls) { 1298 __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std())); 1299 } 1300 else if (CheckJNICalls ) { 1301 __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 1302 } 1303 } 1304 1305 #ifndef _LP64 1306 // Either restore the x87 floating pointer control word after returning 1307 // from the JNI call or verify that it wasn't changed. 1308 if (CheckJNICalls) { 1309 __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 1310 } 1311 #endif // _LP64 1312 1313 1314 // change thread state 1315 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); 1316 if(os::is_MP()) { 1317 // Write serialization page so VM thread can do a pseudo remote membar. 1318 // We use the current thread pointer to calculate a thread specific 1319 // offset to write to within the page. This minimizes bus traffic 1320 // due to cache line collision. 1321 __ serialize_memory(thread, rcx); 1322 } 1323 1324 // check for safepoint operation in progress and/or pending suspend requests 1325 { Label Continue; 1326 1327 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1328 SafepointSynchronize::_not_synchronized); 1329 1330 // threads running native code and they are expected to self-suspend 1331 // when leaving the _thread_in_native state. We need to check for 1332 // pending suspend requests here. 1333 Label L; 1334 __ jcc(Assembler::notEqual, L); 1335 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1336 __ jcc(Assembler::equal, Continue); 1337 __ bind(L); 1338 1339 // Don't use call_VM as it will see a possible pending exception and forward it 1340 // and never return here preventing us from clearing _last_native_pc down below. 1341 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are 1342 // preserved and correspond to the bcp/locals pointers. 1343 // 1344 1345 ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1346 thread); 1347 __ increment(rsp, wordSize); 1348 1349 __ movptr(method, STATE(_method)); 1350 __ verify_oop(method); 1351 __ movptr(thread, STATE(_thread)); // get thread 1352 1353 __ bind(Continue); 1354 } 1355 1356 // change thread state 1357 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1358 1359 __ reset_last_Java_frame(thread, true, true); 1360 1361 // reset handle block 1362 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1363 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1364 1365 // If result was an oop then unbox and save it in the frame 1366 { Label L; 1367 Label no_oop, store_result; 1368 ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT)); 1369 __ cmpptr(STATE(_result_handler), oop_handler.addr()); 1370 __ jcc(Assembler::notEqual, no_oop); 1371 #ifndef _LP64 1372 __ pop(rdx); 1373 #endif // _LP64 1374 __ pop(rax); 1375 __ testptr(rax, rax); 1376 __ jcc(Assembler::zero, store_result); 1377 // unbox 1378 __ movptr(rax, Address(rax, 0)); 1379 __ bind(store_result); 1380 __ movptr(STATE(_oop_temp), rax); 1381 // keep stack depth as expected by pushing oop which will eventually be discarded 1382 __ push(rax); 1383 #ifndef _LP64 1384 __ push(rdx); 1385 #endif // _LP64 1386 __ bind(no_oop); 1387 } 1388 1389 { 1390 Label no_reguard; 1391 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); 1392 __ jcc(Assembler::notEqual, no_reguard); 1393 1394 __ pusha(); 1395 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1396 __ popa(); 1397 1398 __ bind(no_reguard); 1399 } 1400 1401 1402 // QQQ Seems like for native methods we simply return and the caller will see the pending 1403 // exception and do the right thing. Certainly the interpreter will, don't know about 1404 // compiled methods. 1405 // Seems that the answer to above is no this is wrong. The old code would see the exception 1406 // and forward it before doing the unlocking and notifying jvmdi that method has exited. 1407 // This seems wrong need to investigate the spec. 1408 1409 // handle exceptions (exception handling will handle unlocking!) 1410 { Label L; 1411 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1412 __ jcc(Assembler::zero, L); 1413 __ bind(pending_exception_present); 1414 1415 // There are potential results on the stack (rax/rdx, ST(0)) we ignore these and simply 1416 // return and let caller deal with exception. This skips the unlocking here which 1417 // seems wrong but seems to be what asm interpreter did. Can't find this in the spec. 1418 // Note: must preverve method in rbx 1419 // 1420 1421 // remove activation 1422 1423 __ movptr(t, STATE(_sender_sp)); 1424 __ leave(); // remove frame anchor 1425 __ pop(rdi); // get return address 1426 __ movptr(state, STATE(_prev_link)); // get previous state for return 1427 __ mov(rsp, t); // set sp to sender sp 1428 __ push(rdi); // push throwing pc 1429 // The skips unlocking!! This seems to be what asm interpreter does but seems 1430 // very wrong. Not clear if this violates the spec. 1431 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1432 __ bind(L); 1433 } 1434 1435 // do unlocking if necessary 1436 { Label L; 1437 __ movl(t, Address(method, methodOopDesc::access_flags_offset())); 1438 __ testl(t, JVM_ACC_SYNCHRONIZED); 1439 __ jcc(Assembler::zero, L); 1440 // the code below should be shared with interpreter macro assembler implementation 1441 { Label unlock; 1442 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1443 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1444 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1445 __ movptr(monitor, STATE(_monitor_base)); 1446 __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor 1447 1448 __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); 1449 __ testptr(t, t); 1450 __ jcc(Assembler::notZero, unlock); 1451 1452 // Entry already unlocked, need to throw exception 1453 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1454 __ should_not_reach_here(); 1455 1456 __ bind(unlock); 1457 __ unlock_object(monitor); 1458 // unlock can blow rbx so restore it for path that needs it below 1459 __ movptr(method, STATE(_method)); 1460 } 1461 __ bind(L); 1462 } 1463 1464 // jvmti support 1465 // Note: This must happen _after_ handling/throwing any exceptions since 1466 // the exception handler code notifies the runtime of method exits 1467 // too. If this happens before, method entry/exit notifications are 1468 // not properly paired (was bug - gri 11/22/99). 1469 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1470 1471 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result 1472 #ifndef _LP64 1473 __ pop(rdx); 1474 #endif // _LP64 1475 __ pop(rax); 1476 __ movptr(t, STATE(_result_handler)); // get result handler 1477 __ call(t); // call result handler to convert to tosca form 1478 1479 // remove activation 1480 1481 __ movptr(t, STATE(_sender_sp)); 1482 1483 __ leave(); // remove frame anchor 1484 __ pop(rdi); // get return address 1485 __ movptr(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller) 1486 __ mov(rsp, t); // set sp to sender sp 1487 __ jmp(rdi); 1488 1489 // invocation counter overflow 1490 if (inc_counter) { 1491 // Handle overflow of counter and compile method 1492 __ bind(invocation_counter_overflow); 1493 generate_counter_overflow(&continue_after_compile); 1494 } 1495 1496 return entry_point; 1497 } 1498 1499 // Generate entries that will put a result type index into rcx 1500 void CppInterpreterGenerator::generate_deopt_handling() { 1501 1502 Label return_from_deopt_common; 1503 1504 // Generate entries that will put a result type index into rcx 1505 // deopt needs to jump to here to enter the interpreter (return a result) 1506 deopt_frame_manager_return_atos = __ pc(); 1507 1508 // rax is live here 1509 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_OBJECT)); // Result stub address array index 1510 __ jmp(return_from_deopt_common); 1511 1512 1513 // deopt needs to jump to here to enter the interpreter (return a result) 1514 deopt_frame_manager_return_btos = __ pc(); 1515 1516 // rax is live here 1517 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_BOOLEAN)); // Result stub address array index 1518 __ jmp(return_from_deopt_common); 1519 1520 // deopt needs to jump to here to enter the interpreter (return a result) 1521 deopt_frame_manager_return_itos = __ pc(); 1522 1523 // rax is live here 1524 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_INT)); // Result stub address array index 1525 __ jmp(return_from_deopt_common); 1526 1527 // deopt needs to jump to here to enter the interpreter (return a result) 1528 1529 deopt_frame_manager_return_ltos = __ pc(); 1530 // rax,rdx are live here 1531 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_LONG)); // Result stub address array index 1532 __ jmp(return_from_deopt_common); 1533 1534 // deopt needs to jump to here to enter the interpreter (return a result) 1535 1536 deopt_frame_manager_return_ftos = __ pc(); 1537 // st(0) is live here 1538 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index 1539 __ jmp(return_from_deopt_common); 1540 1541 // deopt needs to jump to here to enter the interpreter (return a result) 1542 deopt_frame_manager_return_dtos = __ pc(); 1543 1544 // st(0) is live here 1545 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index 1546 __ jmp(return_from_deopt_common); 1547 1548 // deopt needs to jump to here to enter the interpreter (return a result) 1549 deopt_frame_manager_return_vtos = __ pc(); 1550 1551 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_VOID)); 1552 1553 // Deopt return common 1554 // an index is present in rcx that lets us move any possible result being 1555 // return to the interpreter's stack 1556 // 1557 // Because we have a full sized interpreter frame on the youngest 1558 // activation the stack is pushed too deep to share the tosca to 1559 // stack converters directly. We shrink the stack to the desired 1560 // amount and then push result and then re-extend the stack. 1561 // We could have the code in size_activation layout a short 1562 // frame for the top activation but that would look different 1563 // than say sparc (which needs a full size activation because 1564 // the windows are in the way. Really it could be short? QQQ 1565 // 1566 __ bind(return_from_deopt_common); 1567 1568 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1569 1570 // setup rsp so we can push the "result" as needed. 1571 __ movptr(rsp, STATE(_stack)); // trim stack (is prepushed) 1572 __ addptr(rsp, wordSize); // undo prepush 1573 1574 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack); 1575 // Address index(noreg, rcx, Address::times_ptr); 1576 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr))); 1577 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack))); 1578 __ call(rcx); // call result converter 1579 1580 __ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume); 1581 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present) 1582 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed, 1583 // result if any on stack already ) 1584 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth 1585 } 1586 1587 // Generate the code to handle a more_monitors message from the c++ interpreter 1588 void CppInterpreterGenerator::generate_more_monitors() { 1589 1590 1591 Label entry, loop; 1592 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 1593 // 1. compute new pointers // rsp: old expression stack top 1594 __ movptr(rdx, STATE(_stack_base)); // rdx: old expression stack bottom 1595 __ subptr(rsp, entry_size); // move expression stack top limit 1596 __ subptr(STATE(_stack), entry_size); // update interpreter stack top 1597 __ subptr(STATE(_stack_limit), entry_size); // inform interpreter 1598 __ subptr(rdx, entry_size); // move expression stack bottom 1599 __ movptr(STATE(_stack_base), rdx); // inform interpreter 1600 __ movptr(rcx, STATE(_stack)); // set start value for copy loop 1601 __ jmp(entry); 1602 // 2. move expression stack contents 1603 __ bind(loop); 1604 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location 1605 __ movptr(Address(rcx, 0), rbx); // and store it at new location 1606 __ addptr(rcx, wordSize); // advance to next word 1607 __ bind(entry); 1608 __ cmpptr(rcx, rdx); // check if bottom reached 1609 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word 1610 // now zero the slot so we can find it. 1611 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); 1612 __ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors); 1613 } 1614 1615 1616 // Initial entry to C++ interpreter from the call_stub. 1617 // This entry point is called the frame manager since it handles the generation 1618 // of interpreter activation frames via requests directly from the vm (via call_stub) 1619 // and via requests from the interpreter. The requests from the call_stub happen 1620 // directly thru the entry point. Requests from the interpreter happen via returning 1621 // from the interpreter and examining the message the interpreter has returned to 1622 // the frame manager. The frame manager can take the following requests: 1623 1624 // NO_REQUEST - error, should never happen. 1625 // MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and 1626 // allocate a new monitor. 1627 // CALL_METHOD - setup a new activation to call a new method. Very similar to what 1628 // happens during entry during the entry via the call stub. 1629 // RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub. 1630 // 1631 // Arguments: 1632 // 1633 // rbx: methodOop 1634 // rcx: receiver - unused (retrieved from stack as needed) 1635 // rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2) 1636 // 1637 // 1638 // Stack layout at entry 1639 // 1640 // [ return address ] <--- rsp 1641 // [ parameter n ] 1642 // ... 1643 // [ parameter 1 ] 1644 // [ expression stack ] 1645 // 1646 // 1647 // We are free to blow any registers we like because the call_stub which brought us here 1648 // initially has preserved the callee save registers already. 1649 // 1650 // 1651 1652 static address interpreter_frame_manager = NULL; 1653 1654 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1655 1656 // rbx: methodOop 1657 // rsi/r13: sender sp 1658 1659 // Because we redispatch "recursive" interpreter entries thru this same entry point 1660 // the "input" register usage is a little strange and not what you expect coming 1661 // from the call_stub. From the call stub rsi/rdi (current/previous) interpreter 1662 // state are NULL but on "recursive" dispatches they are what you'd expect. 1663 // rsi: current interpreter state (C++ interpreter) must preserve (null from call_stub/c1/c2) 1664 1665 1666 // A single frame manager is plenty as we don't specialize for synchronized. We could and 1667 // the code is pretty much ready. Would need to change the test below and for good measure 1668 // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized 1669 // routines. Not clear this is worth it yet. 1670 1671 if (interpreter_frame_manager) return interpreter_frame_manager; 1672 1673 address entry_point = __ pc(); 1674 1675 // Fast accessor methods share this entry point. 1676 // This works because frame manager is in the same codelet 1677 if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path); 1678 1679 Label dispatch_entry_2; 1680 __ movptr(rcx, sender_sp_on_entry); 1681 __ movptr(state, (int32_t)NULL_WORD); // no current activation 1682 1683 __ jmp(dispatch_entry_2); 1684 1685 const Register locals = rdi; 1686 1687 Label re_dispatch; 1688 1689 __ bind(re_dispatch); 1690 1691 // save sender sp (doesn't include return address 1692 __ lea(rcx, Address(rsp, wordSize)); 1693 1694 __ bind(dispatch_entry_2); 1695 1696 // save sender sp 1697 __ push(rcx); 1698 1699 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); 1700 const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset()); 1701 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); 1702 1703 // const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 1704 // const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 1705 // const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); 1706 1707 // get parameter size (always needed) 1708 __ load_unsigned_short(rcx, size_of_parameters); 1709 1710 // rbx: methodOop 1711 // rcx: size of parameters 1712 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1713 1714 __ subptr(rdx, rcx); // rdx = no. of additional locals 1715 1716 // see if we've got enough room on the stack for locals plus overhead. 1717 generate_stack_overflow_check(); // C++ 1718 1719 // c++ interpreter does not use stack banging or any implicit exceptions 1720 // leave for now to verify that check is proper. 1721 bang_stack_shadow_pages(false); 1722 1723 1724 1725 // compute beginning of parameters (rdi) 1726 __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize)); 1727 1728 // save sender's sp 1729 // __ movl(rcx, rsp); 1730 1731 // get sender's sp 1732 __ pop(rcx); 1733 1734 // get return address 1735 __ pop(rax); 1736 1737 // rdx - # of additional locals 1738 // allocate space for locals 1739 // explicitly initialize locals 1740 { 1741 Label exit, loop; 1742 __ testl(rdx, rdx); // (32bit ok) 1743 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1744 __ bind(loop); 1745 __ push((int32_t)NULL_WORD); // initialize local variables 1746 __ decrement(rdx); // until everything initialized 1747 __ jcc(Assembler::greater, loop); 1748 __ bind(exit); 1749 } 1750 1751 1752 // Assumes rax = return address 1753 1754 // allocate and initialize new interpreterState and method expression stack 1755 // IN(locals) -> locals 1756 // IN(state) -> any current interpreter activation 1757 // destroys rax, rcx, rdx, rdi 1758 // OUT (state) -> new interpreterState 1759 // OUT(rsp) -> bottom of methods expression stack 1760 1761 generate_compute_interpreter_state(state, locals, rcx, false); 1762 1763 // Call interpreter 1764 1765 Label call_interpreter; 1766 __ bind(call_interpreter); 1767 1768 // c++ interpreter does not use stack banging or any implicit exceptions 1769 // leave for now to verify that check is proper. 1770 bang_stack_shadow_pages(false); 1771 1772 1773 // Call interpreter enter here if message is 1774 // set and we know stack size is valid 1775 1776 Label call_interpreter_2; 1777 1778 __ bind(call_interpreter_2); 1779 1780 { 1781 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1782 1783 #ifdef _LP64 1784 __ mov(c_rarg0, state); 1785 #else 1786 __ push(state); // push arg to interpreter 1787 __ movptr(thread, STATE(_thread)); 1788 #endif // _LP64 1789 1790 // We can setup the frame anchor with everything we want at this point 1791 // as we are thread_in_Java and no safepoints can occur until we go to 1792 // vm mode. We do have to clear flags on return from vm but that is it 1793 // 1794 __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp); 1795 __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp); 1796 1797 // Call the interpreter 1798 1799 RuntimeAddress normal(CAST_FROM_FN_PTR(address, BytecodeInterpreter::run)); 1800 RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks)); 1801 1802 __ call(JvmtiExport::can_post_interpreter_events() ? checking : normal); 1803 NOT_LP64(__ pop(rax);) // discard parameter to run 1804 // 1805 // state is preserved since it is callee saved 1806 // 1807 1808 // reset_last_Java_frame 1809 1810 NOT_LP64(__ movl(thread, STATE(_thread));) 1811 __ reset_last_Java_frame(thread, true, true); 1812 } 1813 1814 // examine msg from interpreter to determine next action 1815 1816 __ movl(rdx, STATE(_msg)); // Get new message 1817 1818 Label call_method; 1819 Label return_from_interpreted_method; 1820 Label throw_exception; 1821 Label bad_msg; 1822 Label do_OSR; 1823 1824 __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method); 1825 __ jcc(Assembler::equal, call_method); 1826 __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method); 1827 __ jcc(Assembler::equal, return_from_interpreted_method); 1828 __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr); 1829 __ jcc(Assembler::equal, do_OSR); 1830 __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception); 1831 __ jcc(Assembler::equal, throw_exception); 1832 __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors); 1833 __ jcc(Assembler::notEqual, bad_msg); 1834 1835 // Allocate more monitor space, shuffle expression stack.... 1836 1837 generate_more_monitors(); 1838 1839 __ jmp(call_interpreter); 1840 1841 // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode) 1842 unctrap_frame_manager_entry = __ pc(); 1843 // 1844 // Load the registers we need. 1845 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1846 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth 1847 __ jmp(call_interpreter_2); 1848 1849 1850 1851 //============================================================================= 1852 // Returning from a compiled method into a deopted method. The bytecode at the 1853 // bcp has completed. The result of the bytecode is in the native abi (the tosca 1854 // for the template based interpreter). Any stack space that was used by the 1855 // bytecode that has completed has been removed (e.g. parameters for an invoke) 1856 // so all that we have to do is place any pending result on the expression stack 1857 // and resume execution on the next bytecode. 1858 1859 1860 generate_deopt_handling(); 1861 __ jmp(call_interpreter); 1862 1863 1864 // Current frame has caught an exception we need to dispatch to the 1865 // handler. We can get here because a native interpreter frame caught 1866 // an exception in which case there is no handler and we must rethrow 1867 // If it is a vanilla interpreted frame the we simply drop into the 1868 // interpreter and let it do the lookup. 1869 1870 Interpreter::_rethrow_exception_entry = __ pc(); 1871 // rax: exception 1872 // rdx: return address/pc that threw exception 1873 1874 Label return_with_exception; 1875 Label unwind_and_forward; 1876 1877 // restore state pointer. 1878 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1879 1880 __ movptr(rbx, STATE(_method)); // get method 1881 #ifdef _LP64 1882 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 1883 #else 1884 __ movl(rcx, STATE(_thread)); // get thread 1885 1886 // Store exception with interpreter will expect it 1887 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax); 1888 #endif // _LP64 1889 1890 // is current frame vanilla or native? 1891 1892 __ movl(rdx, access_flags); 1893 __ testl(rdx, JVM_ACC_NATIVE); 1894 __ jcc(Assembler::zero, return_with_exception); // vanilla interpreted frame, handle directly 1895 1896 // We drop thru to unwind a native interpreted frame with a pending exception 1897 // We jump here for the initial interpreter frame with exception pending 1898 // We unwind the current acivation and forward it to our caller. 1899 1900 __ bind(unwind_and_forward); 1901 1902 // unwind rbp, return stack to unextended value and re-push return address 1903 1904 __ movptr(rcx, STATE(_sender_sp)); 1905 __ leave(); 1906 __ pop(rdx); 1907 __ mov(rsp, rcx); 1908 __ push(rdx); 1909 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1910 1911 // Return point from a call which returns a result in the native abi 1912 // (c1/c2/jni-native). This result must be processed onto the java 1913 // expression stack. 1914 // 1915 // A pending exception may be present in which case there is no result present 1916 1917 Label resume_interpreter; 1918 Label do_float; 1919 Label do_double; 1920 Label done_conv; 1921 1922 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 1923 if (UseSSE < 2) { 1924 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1925 __ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed 1926 __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset())); 1927 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index 1928 __ jcc(Assembler::equal, do_float); 1929 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index 1930 __ jcc(Assembler::equal, do_double); 1931 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) 1932 __ empty_FPU_stack(); 1933 #endif // COMPILER2 1934 __ jmp(done_conv); 1935 1936 __ bind(do_float); 1937 #ifdef COMPILER2 1938 for (int i = 1; i < 8; i++) { 1939 __ ffree(i); 1940 } 1941 #endif // COMPILER2 1942 __ jmp(done_conv); 1943 __ bind(do_double); 1944 #ifdef COMPILER2 1945 for (int i = 1; i < 8; i++) { 1946 __ ffree(i); 1947 } 1948 #endif // COMPILER2 1949 __ jmp(done_conv); 1950 } else { 1951 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 1952 __ jmp(done_conv); 1953 } 1954 1955 // Return point to interpreter from compiled/native method 1956 InternalAddress return_from_native_method(__ pc()); 1957 1958 __ bind(done_conv); 1959 1960 1961 // Result if any is in tosca. The java expression stack is in the state that the 1962 // calling convention left it (i.e. params may or may not be present) 1963 // Copy the result from tosca and place it on java expression stack. 1964 1965 // Restore rsi/r13 as compiled code may not preserve it 1966 1967 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); 1968 1969 // restore stack to what we had when we left (in case i2c extended it) 1970 1971 __ movptr(rsp, STATE(_stack)); 1972 __ lea(rsp, Address(rsp, wordSize)); 1973 1974 // If there is a pending exception then we don't really have a result to process 1975 1976 #ifdef _LP64 1977 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1978 #else 1979 __ movptr(rcx, STATE(_thread)); // get thread 1980 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1981 #endif // _LP64 1982 __ jcc(Assembler::notZero, return_with_exception); 1983 1984 // get method just executed 1985 __ movptr(rbx, STATE(_result._to_call._callee)); 1986 1987 // callee left args on top of expression stack, remove them 1988 __ load_unsigned_short(rcx, Address(rbx, methodOopDesc::size_of_parameters_offset())); 1989 __ lea(rsp, Address(rsp, rcx, Address::times_ptr)); 1990 1991 __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset())); 1992 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack); 1993 // Address index(noreg, rax, Address::times_ptr); 1994 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr))); 1995 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack))); 1996 __ call(rcx); // call result converter 1997 __ jmp(resume_interpreter); 1998 1999 // An exception is being caught on return to a vanilla interpreter frame. 2000 // Empty the stack and resume interpreter 2001 2002 __ bind(return_with_exception); 2003 2004 // Exception present, empty stack 2005 __ movptr(rsp, STATE(_stack_base)); 2006 __ jmp(resume_interpreter); 2007 2008 // Return from interpreted method we return result appropriate to the caller (i.e. "recursive" 2009 // interpreter call, or native) and unwind this interpreter activation. 2010 // All monitors should be unlocked. 2011 2012 __ bind(return_from_interpreted_method); 2013 2014 Label return_to_initial_caller; 2015 2016 __ movptr(rbx, STATE(_method)); // get method just executed 2017 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call? 2018 __ movl(rax, Address(rbx, methodOopDesc::result_index_offset())); // get result type index 2019 __ jcc(Assembler::equal, return_to_initial_caller); // back to native code (call_stub/c1/c2) 2020 2021 // Copy result to callers java stack 2022 ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack); 2023 // Address index(noreg, rax, Address::times_ptr); 2024 2025 __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr))); 2026 // __ movl(rax, Address(noreg, rax, Address::times_ptr, int(AbstractInterpreter::_stack_to_stack))); 2027 __ call(rax); // call result converter 2028 2029 Label unwind_recursive_activation; 2030 __ bind(unwind_recursive_activation); 2031 2032 // returning to interpreter method from "recursive" interpreter call 2033 // result converter left rax pointing to top of the java stack for method we are returning 2034 // to. Now all we must do is unwind the state from the completed call 2035 2036 __ movptr(state, STATE(_prev_link)); // unwind state 2037 __ leave(); // pop the frame 2038 __ mov(rsp, rax); // unwind stack to remove args 2039 2040 // Resume the interpreter. The current frame contains the current interpreter 2041 // state object. 2042 // 2043 2044 __ bind(resume_interpreter); 2045 2046 // state == interpreterState object for method we are resuming 2047 2048 __ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume); 2049 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present) 2050 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed, 2051 // result if any on stack already ) 2052 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth 2053 __ jmp(call_interpreter_2); // No need to bang 2054 2055 // interpreter returning to native code (call_stub/c1/c2) 2056 // convert result and unwind initial activation 2057 // rax - result index 2058 2059 __ bind(return_to_initial_caller); 2060 ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi); 2061 // Address index(noreg, rax, Address::times_ptr); 2062 2063 __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr))); 2064 __ call(rax); // call result converter 2065 2066 Label unwind_initial_activation; 2067 __ bind(unwind_initial_activation); 2068 2069 // RETURN TO CALL_STUB/C1/C2 code (result if any in rax/rdx ST(0)) 2070 2071 /* Current stack picture 2072 2073 [ incoming parameters ] 2074 [ extra locals ] 2075 [ return address to CALL_STUB/C1/C2] 2076 fp -> [ CALL_STUB/C1/C2 fp ] 2077 BytecodeInterpreter object 2078 expression stack 2079 sp -> 2080 2081 */ 2082 2083 // return restoring the stack to the original sender_sp value 2084 2085 __ movptr(rcx, STATE(_sender_sp)); 2086 __ leave(); 2087 __ pop(rdi); // get return address 2088 // set stack to sender's sp 2089 __ mov(rsp, rcx); 2090 __ jmp(rdi); // return to call_stub 2091 2092 // OSR request, adjust return address to make current frame into adapter frame 2093 // and enter OSR nmethod 2094 2095 __ bind(do_OSR); 2096 2097 Label remove_initial_frame; 2098 2099 // We are going to pop this frame. Is there another interpreter frame underneath 2100 // it or is it callstub/compiled? 2101 2102 // Move buffer to the expected parameter location 2103 __ movptr(rcx, STATE(_result._osr._osr_buf)); 2104 2105 __ movptr(rax, STATE(_result._osr._osr_entry)); 2106 2107 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call? 2108 __ jcc(Assembler::equal, remove_initial_frame); // back to native code (call_stub/c1/c2) 2109 2110 __ movptr(sender_sp_on_entry, STATE(_sender_sp)); // get sender's sp in expected register 2111 __ leave(); // pop the frame 2112 __ mov(rsp, sender_sp_on_entry); // trim any stack expansion 2113 2114 2115 // We know we are calling compiled so push specialized return 2116 // method uses specialized entry, push a return so we look like call stub setup 2117 // this path will handle fact that result is returned in registers and not 2118 // on the java stack. 2119 2120 __ pushptr(return_from_native_method.addr()); 2121 2122 __ jmp(rax); 2123 2124 __ bind(remove_initial_frame); 2125 2126 __ movptr(rdx, STATE(_sender_sp)); 2127 __ leave(); 2128 // get real return 2129 __ pop(rsi); 2130 // set stack to sender's sp 2131 __ mov(rsp, rdx); 2132 // repush real return 2133 __ push(rsi); 2134 // Enter OSR nmethod 2135 __ jmp(rax); 2136 2137 2138 2139 2140 // Call a new method. All we do is (temporarily) trim the expression stack 2141 // push a return address to bring us back to here and leap to the new entry. 2142 2143 __ bind(call_method); 2144 2145 // stack points to next free location and not top element on expression stack 2146 // method expects sp to be pointing to topmost element 2147 2148 __ movptr(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top 2149 __ lea(rsp, Address(rsp, wordSize)); 2150 2151 __ movptr(rbx, STATE(_result._to_call._callee)); // get method to execute 2152 2153 // don't need a return address if reinvoking interpreter 2154 2155 // Make it look like call_stub calling conventions 2156 2157 // Get (potential) receiver 2158 __ load_unsigned_short(rcx, size_of_parameters); // get size of parameters in words 2159 2160 ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation)); 2161 __ pushptr(recursive.addr()); // make it look good in the debugger 2162 2163 InternalAddress entry(entry_point); 2164 __ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter? 2165 __ jcc(Assembler::equal, re_dispatch); // yes 2166 2167 __ pop(rax); // pop dummy address 2168 2169 2170 // get specialized entry 2171 __ movptr(rax, STATE(_result._to_call._callee_entry_point)); 2172 // set sender SP 2173 __ mov(sender_sp_on_entry, rsp); 2174 2175 // method uses specialized entry, push a return so we look like call stub setup 2176 // this path will handle fact that result is returned in registers and not 2177 // on the java stack. 2178 2179 __ pushptr(return_from_native_method.addr()); 2180 2181 __ jmp(rax); 2182 2183 __ bind(bad_msg); 2184 __ stop("Bad message from interpreter"); 2185 2186 // Interpreted method "returned" with an exception pass it on... 2187 // Pass result, unwind activation and continue/return to interpreter/call_stub 2188 // We handle result (if any) differently based on return to interpreter or call_stub 2189 2190 Label unwind_initial_with_pending_exception; 2191 2192 __ bind(throw_exception); 2193 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from recursive interpreter call? 2194 __ jcc(Assembler::equal, unwind_initial_with_pending_exception); // no, back to native code (call_stub/c1/c2) 2195 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value 2196 __ addptr(rax, wordSize); // account for prepush before we return 2197 __ jmp(unwind_recursive_activation); 2198 2199 __ bind(unwind_initial_with_pending_exception); 2200 2201 // We will unwind the current (initial) interpreter frame and forward 2202 // the exception to the caller. We must put the exception in the 2203 // expected register and clear pending exception and then forward. 2204 2205 __ jmp(unwind_and_forward); 2206 2207 interpreter_frame_manager = entry_point; 2208 return entry_point; 2209 } 2210 2211 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) { 2212 // determine code generation flags 2213 bool synchronized = false; 2214 address entry_point = NULL; 2215 2216 switch (kind) { 2217 case Interpreter::zerolocals : break; 2218 case Interpreter::zerolocals_synchronized: synchronized = true; break; 2219 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; 2220 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break; 2221 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; 2222 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; 2223 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; 2224 case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break; 2225 2226 case Interpreter::java_lang_math_sin : // fall thru 2227 case Interpreter::java_lang_math_cos : // fall thru 2228 case Interpreter::java_lang_math_tan : // fall thru 2229 case Interpreter::java_lang_math_abs : // fall thru 2230 case Interpreter::java_lang_math_log : // fall thru 2231 case Interpreter::java_lang_math_log10 : // fall thru 2232 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; 2233 case Interpreter::java_lang_ref_reference_get 2234 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; 2235 default : ShouldNotReachHere(); break; 2236 } 2237 2238 if (entry_point) return entry_point; 2239 2240 return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized); 2241 2242 } 2243 2244 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 2245 : CppInterpreterGenerator(code) { 2246 generate_all(); // down here so it can be "virtual" 2247 } 2248 2249 // Deoptimization helpers for C++ interpreter 2250 2251 // How much stack a method activation needs in words. 2252 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { 2253 2254 const int stub_code = 4; // see generate_call_stub 2255 // Save space for one monitor to get into the interpreted method in case 2256 // the method is synchronized 2257 int monitor_size = method->is_synchronized() ? 2258 1*frame::interpreter_frame_monitor_size() : 0; 2259 2260 // total static overhead size. Account for interpreter state object, return 2261 // address, saved rbp and 2 words for a "static long no_params() method" issue. 2262 2263 const int overhead_size = sizeof(BytecodeInterpreter)/wordSize + 2264 ( frame::sender_sp_offset - frame::link_offset) + 2; 2265 2266 const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); 2267 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * 2268 Interpreter::stackElementWords(); 2269 return overhead_size + method_stack + stub_code; 2270 } 2271 2272 // returns the activation size. 2273 static int size_activation_helper(int extra_locals_size, int monitor_size) { 2274 return (extra_locals_size + // the addition space for locals 2275 2*BytesPerWord + // return address and saved rbp 2276 2*BytesPerWord + // "static long no_params() method" issue 2277 sizeof(BytecodeInterpreter) + // interpreterState 2278 monitor_size); // monitors 2279 } 2280 2281 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill, 2282 frame* caller, 2283 frame* current, 2284 methodOop method, 2285 intptr_t* locals, 2286 intptr_t* stack, 2287 intptr_t* stack_base, 2288 intptr_t* monitor_base, 2289 intptr_t* frame_bottom, 2290 bool is_top_frame 2291 ) 2292 { 2293 // What about any vtable? 2294 // 2295 to_fill->_thread = JavaThread::current(); 2296 // This gets filled in later but make it something recognizable for now 2297 to_fill->_bcp = method->code_base(); 2298 to_fill->_locals = locals; 2299 to_fill->_constants = method->constants()->cache(); 2300 to_fill->_method = method; 2301 to_fill->_mdx = NULL; 2302 to_fill->_stack = stack; 2303 if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) { 2304 to_fill->_msg = deopt_resume2; 2305 } else { 2306 to_fill->_msg = method_resume; 2307 } 2308 to_fill->_result._to_call._bcp_advance = 0; 2309 to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone 2310 to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone 2311 to_fill->_prev_link = NULL; 2312 2313 to_fill->_sender_sp = caller->unextended_sp(); 2314 2315 if (caller->is_interpreted_frame()) { 2316 interpreterState prev = caller->get_interpreterState(); 2317 to_fill->_prev_link = prev; 2318 // *current->register_addr(GR_Iprev_state) = (intptr_t) prev; 2319 // Make the prev callee look proper 2320 prev->_result._to_call._callee = method; 2321 if (*prev->_bcp == Bytecodes::_invokeinterface) { 2322 prev->_result._to_call._bcp_advance = 5; 2323 } else { 2324 prev->_result._to_call._bcp_advance = 3; 2325 } 2326 } 2327 to_fill->_oop_temp = NULL; 2328 to_fill->_stack_base = stack_base; 2329 // Need +1 here because stack_base points to the word just above the first expr stack entry 2330 // and stack_limit is supposed to point to the word just below the last expr stack entry. 2331 // See generate_compute_interpreter_state. 2332 int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); 2333 to_fill->_stack_limit = stack_base - (method->max_stack() + extra_stack + 1); 2334 to_fill->_monitor_base = (BasicObjectLock*) monitor_base; 2335 2336 to_fill->_self_link = to_fill; 2337 assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base, 2338 "Stack top out of range"); 2339 } 2340 2341 int AbstractInterpreter::layout_activation(methodOop method, 2342 int tempcount, // 2343 int popframe_extra_args, 2344 int moncount, 2345 int caller_actual_parameters, 2346 int callee_param_count, 2347 int callee_locals, 2348 frame* caller, 2349 frame* interpreter_frame, 2350 bool is_top_frame) { 2351 2352 assert(popframe_extra_args == 0, "FIX ME"); 2353 // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state() 2354 // does as far as allocating an interpreter frame. 2355 // If interpreter_frame!=NULL, set up the method, locals, and monitors. 2356 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size, 2357 // as determined by a previous call to this method. 2358 // It is also guaranteed to be walkable even though it is in a skeletal state 2359 // NOTE: return size is in words not bytes 2360 // NOTE: tempcount is the current size of the java expression stack. For top most 2361 // frames we will allocate a full sized expression stack and not the curback 2362 // version that non-top frames have. 2363 2364 // Calculate the amount our frame will be adjust by the callee. For top frame 2365 // this is zero. 2366 2367 // NOTE: ia64 seems to do this wrong (or at least backwards) in that it 2368 // calculates the extra locals based on itself. Not what the callee does 2369 // to it. So it ignores last_frame_adjust value. Seems suspicious as far 2370 // as getting sender_sp correct. 2371 2372 int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord; 2373 int monitor_size = sizeof(BasicObjectLock) * moncount; 2374 2375 // First calculate the frame size without any java expression stack 2376 int short_frame_size = size_activation_helper(extra_locals_size, 2377 monitor_size); 2378 2379 // Now with full size expression stack 2380 int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); 2381 int full_frame_size = short_frame_size + (method->max_stack() + extra_stack) * BytesPerWord; 2382 2383 // and now with only live portion of the expression stack 2384 short_frame_size = short_frame_size + tempcount * BytesPerWord; 2385 2386 // the size the activation is right now. Only top frame is full size 2387 int frame_size = (is_top_frame ? full_frame_size : short_frame_size); 2388 2389 if (interpreter_frame != NULL) { 2390 #ifdef ASSERT 2391 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable"); 2392 #endif 2393 2394 // MUCHO HACK 2395 2396 intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size)); 2397 2398 /* Now fillin the interpreterState object */ 2399 2400 // The state object is the first thing on the frame and easily located 2401 2402 interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter)); 2403 2404 2405 // Find the locals pointer. This is rather simple on x86 because there is no 2406 // confusing rounding at the callee to account for. We can trivially locate 2407 // our locals based on the current fp(). 2408 // Note: the + 2 is for handling the "static long no_params() method" issue. 2409 // (too bad I don't really remember that issue well...) 2410 2411 intptr_t* locals; 2412 // If the caller is interpreted we need to make sure that locals points to the first 2413 // argument that the caller passed and not in an area where the stack might have been extended. 2414 // because the stack to stack to converter needs a proper locals value in order to remove the 2415 // arguments from the caller and place the result in the proper location. Hmm maybe it'd be 2416 // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code 2417 // adjust the stack?? HMMM QQQ 2418 // 2419 if (caller->is_interpreted_frame()) { 2420 // locals must agree with the caller because it will be used to set the 2421 // caller's tos when we return. 2422 interpreterState prev = caller->get_interpreterState(); 2423 // stack() is prepushed. 2424 locals = prev->stack() + method->size_of_parameters(); 2425 // locals = caller->unextended_sp() + (method->size_of_parameters() - 1); 2426 if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) { 2427 // os::breakpoint(); 2428 } 2429 } else { 2430 // this is where a c2i would have placed locals (except for the +2) 2431 locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2; 2432 } 2433 2434 intptr_t* monitor_base = (intptr_t*) cur_state; 2435 intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size); 2436 /* +1 because stack is always prepushed */ 2437 intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord); 2438 2439 2440 BytecodeInterpreter::layout_interpreterState(cur_state, 2441 caller, 2442 interpreter_frame, 2443 method, 2444 locals, 2445 stack, 2446 stack_base, 2447 monitor_base, 2448 frame_bottom, 2449 is_top_frame); 2450 2451 // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp()); 2452 } 2453 return frame_size/BytesPerWord; 2454 } 2455 2456 #endif // CC_INTERP (all)