1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 #define __ _masm-> 51 52 // Global Register Names 53 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 54 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 55 56 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 57 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 58 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 59 60 //----------------------------------------------------------------------------- 61 62 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 63 address entry = __ pc(); 64 65 #ifdef ASSERT 66 { 67 Label L; 68 __ lea(rax, Address(rbp, 69 frame::interpreter_frame_monitor_block_top_offset * 70 wordSize)); 71 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 72 // grows negative) 73 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 74 __ stop ("interpreter frame not set up"); 75 __ bind(L); 76 } 77 #endif // ASSERT 78 // Restore bcp under the assumption that the current frame is still 79 // interpreted 80 __ restore_bcp(); 81 82 // expression stack must be empty before entering the VM if an 83 // exception happened 84 __ empty_expression_stack(); 85 // throw exception 86 __ call_VM(noreg, 87 CAST_FROM_FN_PTR(address, 88 InterpreterRuntime::throw_StackOverflowError)); 89 return entry; 90 } 91 92 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 93 const char* name) { 94 address entry = __ pc(); 95 // expression stack must be empty before entering the VM if an 96 // exception happened 97 __ empty_expression_stack(); 98 // setup parameters 99 // ??? convention: expect aberrant index in register ebx 100 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 101 __ lea(rarg, ExternalAddress((address)name)); 102 __ call_VM(noreg, 103 CAST_FROM_FN_PTR(address, 104 InterpreterRuntime:: 105 throw_ArrayIndexOutOfBoundsException), 106 rarg, rbx); 107 return entry; 108 } 109 110 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 111 address entry = __ pc(); 112 113 // object is at TOS 114 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 115 __ pop(rarg); 116 117 // expression stack must be empty before entering the VM if an 118 // exception happened 119 __ empty_expression_stack(); 120 121 __ call_VM(noreg, 122 CAST_FROM_FN_PTR(address, 123 InterpreterRuntime:: 124 throw_ClassCastException), 125 rarg); 126 return entry; 127 } 128 129 address TemplateInterpreterGenerator::generate_exception_handler_common( 130 const char* name, const char* message, bool pass_oop) { 131 assert(!pass_oop || message == NULL, "either oop or message but not both"); 132 address entry = __ pc(); 133 134 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 135 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 136 137 if (pass_oop) { 138 // object is at TOS 139 __ pop(rarg2); 140 } 141 // expression stack must be empty before entering the VM if an 142 // exception happened 143 __ empty_expression_stack(); 144 // setup parameters 145 __ lea(rarg, ExternalAddress((address)name)); 146 if (pass_oop) { 147 __ call_VM(rax, CAST_FROM_FN_PTR(address, 148 InterpreterRuntime:: 149 create_klass_exception), 150 rarg, rarg2); 151 } else { 152 // kind of lame ExternalAddress can't take NULL because 153 // external_word_Relocation will assert. 154 if (message != NULL) { 155 __ lea(rarg2, ExternalAddress((address)message)); 156 } else { 157 __ movptr(rarg2, NULL_WORD); 158 } 159 __ call_VM(rax, 160 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 161 rarg, rarg2); 162 } 163 // throw exception 164 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 165 return entry; 166 } 167 168 169 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 170 address entry = __ pc(); 171 // NULL last_sp until next java call 172 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 173 __ dispatch_next(state); 174 return entry; 175 } 176 177 178 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 179 address entry = __ pc(); 180 181 #ifndef _LP64 182 #ifdef COMPILER2 183 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 184 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 185 for (int i = 1; i < 8; i++) { 186 __ ffree(i); 187 } 188 } else if (UseSSE < 2) { 189 __ empty_FPU_stack(); 190 } 191 #endif // COMPILER2 192 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 193 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 194 } else { 195 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 196 } 197 198 if (state == ftos) { 199 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 200 } else if (state == dtos) { 201 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 202 } 203 #endif // _LP64 204 205 // Restore stack bottom in case i2c adjusted stack 206 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 207 // and NULL it as marker that esp is now tos until next java call 208 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 209 210 __ restore_bcp(); 211 __ restore_locals(); 212 213 if (state == atos) { 214 Register mdp = rbx; 215 Register tmp = rcx; 216 __ profile_return_type(mdp, rax, tmp); 217 } 218 219 const Register cache = rbx; 220 const Register index = rcx; 221 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 222 223 const Register flags = cache; 224 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 225 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 226 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 227 __ dispatch_next(state, step); 228 229 return entry; 230 } 231 232 233 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 234 address entry = __ pc(); 235 236 #ifndef _LP64 237 if (state == ftos) { 238 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 239 } else if (state == dtos) { 240 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 241 } 242 #endif // _LP64 243 244 // NULL last_sp until next java call 245 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 246 __ restore_bcp(); 247 __ restore_locals(); 248 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 249 NOT_LP64(__ get_thread(thread)); 250 #if INCLUDE_JVMCI 251 // Check if we need to take lock at entry of synchronized method. 252 if (UseJVMCICompiler) { 253 Label L; 254 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 255 __ jcc(Assembler::zero, L); 256 // Clear flag. 257 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 258 // Satisfy calling convention for lock_method(). 259 __ get_method(rbx); 260 // Take lock. 261 lock_method(); 262 __ bind(L); 263 } 264 #endif 265 // handle exceptions 266 { 267 Label L; 268 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 269 __ jcc(Assembler::zero, L); 270 __ call_VM(noreg, 271 CAST_FROM_FN_PTR(address, 272 InterpreterRuntime::throw_pending_exception)); 273 __ should_not_reach_here(); 274 __ bind(L); 275 } 276 __ dispatch_next(state, step); 277 return entry; 278 } 279 280 address TemplateInterpreterGenerator::generate_result_handler_for( 281 BasicType type) { 282 address entry = __ pc(); 283 switch (type) { 284 case T_BOOLEAN: __ c2bool(rax); break; 285 #ifndef _LP64 286 case T_CHAR : __ andptr(rax, 0xFFFF); break; 287 #else 288 case T_CHAR : __ movzwl(rax, rax); break; 289 #endif // _LP64 290 case T_BYTE : __ sign_extend_byte(rax); break; 291 case T_SHORT : __ sign_extend_short(rax); break; 292 case T_INT : /* nothing to do */ break; 293 case T_LONG : /* nothing to do */ break; 294 case T_VOID : /* nothing to do */ break; 295 #ifndef _LP64 296 case T_DOUBLE : 297 case T_FLOAT : 298 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 299 __ pop(t); // remove return address first 300 // Must return a result for interpreter or compiler. In SSE 301 // mode, results are returned in xmm0 and the FPU stack must 302 // be empty. 303 if (type == T_FLOAT && UseSSE >= 1) { 304 // Load ST0 305 __ fld_d(Address(rsp, 0)); 306 // Store as float and empty fpu stack 307 __ fstp_s(Address(rsp, 0)); 308 // and reload 309 __ movflt(xmm0, Address(rsp, 0)); 310 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 311 __ movdbl(xmm0, Address(rsp, 0)); 312 } else { 313 // restore ST0 314 __ fld_d(Address(rsp, 0)); 315 } 316 // and pop the temp 317 __ addptr(rsp, 2 * wordSize); 318 __ push(t); // restore return address 319 } 320 break; 321 #else 322 case T_FLOAT : /* nothing to do */ break; 323 case T_DOUBLE : /* nothing to do */ break; 324 #endif // _LP64 325 326 case T_OBJECT : 327 // retrieve result from frame 328 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 329 // and verify it 330 __ verify_oop(rax); 331 break; 332 default : ShouldNotReachHere(); 333 } 334 __ ret(0); // return from result handler 335 return entry; 336 } 337 338 address TemplateInterpreterGenerator::generate_safept_entry_for( 339 TosState state, 340 address runtime_entry) { 341 address entry = __ pc(); 342 __ push(state); 343 __ call_VM(noreg, runtime_entry); 344 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 345 return entry; 346 } 347 348 349 350 // Helpers for commoning out cases in the various type of method entries. 351 // 352 353 354 // increment invocation count & check for overflow 355 // 356 // Note: checking for negative value instead of overflow 357 // so we have a 'sticky' overflow test 358 // 359 // rbx: method 360 // rcx: invocation counter 361 // 362 void TemplateInterpreterGenerator::generate_counter_incr( 363 Label* overflow, 364 Label* profile_method, 365 Label* profile_method_continue) { 366 Label done; 367 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 368 if (TieredCompilation) { 369 int increment = InvocationCounter::count_increment; 370 Label no_mdo; 371 if (ProfileInterpreter) { 372 // Are we profiling? 373 __ movptr(rax, Address(rbx, Method::method_data_offset())); 374 __ testptr(rax, rax); 375 __ jccb(Assembler::zero, no_mdo); 376 // Increment counter in the MDO 377 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 378 in_bytes(InvocationCounter::counter_offset())); 379 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 380 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 381 __ jmp(done); 382 } 383 __ bind(no_mdo); 384 // Increment counter in MethodCounters 385 const Address invocation_counter(rax, 386 MethodCounters::invocation_counter_offset() + 387 InvocationCounter::counter_offset()); 388 __ get_method_counters(rbx, rax, done); 389 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 390 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 391 false, Assembler::zero, overflow); 392 __ bind(done); 393 } else { // not TieredCompilation 394 const Address backedge_counter(rax, 395 MethodCounters::backedge_counter_offset() + 396 InvocationCounter::counter_offset()); 397 const Address invocation_counter(rax, 398 MethodCounters::invocation_counter_offset() + 399 InvocationCounter::counter_offset()); 400 401 __ get_method_counters(rbx, rax, done); 402 403 if (ProfileInterpreter) { 404 __ incrementl(Address(rax, 405 MethodCounters::interpreter_invocation_counter_offset())); 406 } 407 // Update standard invocation counters 408 __ movl(rcx, invocation_counter); 409 __ incrementl(rcx, InvocationCounter::count_increment); 410 __ movl(invocation_counter, rcx); // save invocation count 411 412 __ movl(rax, backedge_counter); // load backedge counter 413 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 414 415 __ addl(rcx, rax); // add both counters 416 417 // profile_method is non-null only for interpreted method so 418 // profile_method != NULL == !native_call 419 420 if (ProfileInterpreter && profile_method != NULL) { 421 // Test to see if we should create a method data oop 422 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 423 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 424 __ jcc(Assembler::less, *profile_method_continue); 425 426 // if no method data exists, go to profile_method 427 __ test_method_data_pointer(rax, *profile_method); 428 } 429 430 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 431 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 432 __ jcc(Assembler::aboveEqual, *overflow); 433 __ bind(done); 434 } 435 } 436 437 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 438 439 // Asm interpreter on entry 440 // r14/rdi - locals 441 // r13/rsi - bcp 442 // rbx - method 443 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 444 // rbp - interpreter frame 445 446 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 447 // Everything as it was on entry 448 // rdx is not restored. Doesn't appear to really be set. 449 450 // InterpreterRuntime::frequency_counter_overflow takes two 451 // arguments, the first (thread) is passed by call_VM, the second 452 // indicates if the counter overflow occurs at a backwards branch 453 // (NULL bcp). We pass zero for it. The call returns the address 454 // of the verified entry point for the method or NULL if the 455 // compilation did not complete (either went background or bailed 456 // out). 457 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 458 __ movl(rarg, 0); 459 __ call_VM(noreg, 460 CAST_FROM_FN_PTR(address, 461 InterpreterRuntime::frequency_counter_overflow), 462 rarg); 463 464 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 465 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 466 // and jump to the interpreted entry. 467 __ jmp(do_continue, relocInfo::none); 468 } 469 470 // See if we've got enough room on the stack for locals plus overhead. 471 // The expression stack grows down incrementally, so the normal guard 472 // page mechanism will work for that. 473 // 474 // NOTE: Since the additional locals are also always pushed (wasn't 475 // obvious in generate_fixed_frame) so the guard should work for them 476 // too. 477 // 478 // Args: 479 // rdx: number of additional locals this frame needs (what we must check) 480 // rbx: Method* 481 // 482 // Kills: 483 // rax 484 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 485 486 // monitor entry size: see picture of stack in frame_x86.hpp 487 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 488 489 // total overhead size: entry_size + (saved rbp through expr stack 490 // bottom). be sure to change this if you add/subtract anything 491 // to/from the overhead area 492 const int overhead_size = 493 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 494 495 const int page_size = os::vm_page_size(); 496 497 Label after_frame_check; 498 499 // see if the frame is greater than one page in size. If so, 500 // then we need to verify there is enough stack space remaining 501 // for the additional locals. 502 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 503 __ jcc(Assembler::belowEqual, after_frame_check); 504 505 // compute rsp as if this were going to be the last frame on 506 // the stack before the red zone 507 508 Label after_frame_check_pop; 509 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 510 #ifndef _LP64 511 __ push(thread); 512 __ get_thread(thread); 513 #endif 514 515 const Address stack_base(thread, Thread::stack_base_offset()); 516 const Address stack_size(thread, Thread::stack_size_offset()); 517 518 // locals + overhead, in bytes 519 __ mov(rax, rdx); 520 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter. 521 __ addptr(rax, overhead_size); 522 523 #ifdef ASSERT 524 Label stack_base_okay, stack_size_okay; 525 // verify that thread stack base is non-zero 526 __ cmpptr(stack_base, (int32_t)NULL_WORD); 527 __ jcc(Assembler::notEqual, stack_base_okay); 528 __ stop("stack base is zero"); 529 __ bind(stack_base_okay); 530 // verify that thread stack size is non-zero 531 __ cmpptr(stack_size, 0); 532 __ jcc(Assembler::notEqual, stack_size_okay); 533 __ stop("stack size is zero"); 534 __ bind(stack_size_okay); 535 #endif 536 537 // Add stack base to locals and subtract stack size 538 __ addptr(rax, stack_base); 539 __ subptr(rax, stack_size); 540 541 // Use the bigger size for banging. 542 const int max_bang_size = (int)MAX2(JavaThread::stack_shadow_zone_size(), 543 JavaThread::stack_guard_zone_size()); 544 545 // add in the red and yellow zone sizes 546 __ addptr(rax, max_bang_size); 547 548 // check against the current stack bottom 549 __ cmpptr(rsp, rax); 550 551 __ jcc(Assembler::above, after_frame_check_pop); 552 NOT_LP64(__ pop(rsi)); // get saved bcp 553 554 // Restore sender's sp as SP. This is necessary if the sender's 555 // frame is an extended compiled frame (see gen_c2i_adapter()) 556 // and safer anyway in case of JSR292 adaptations. 557 558 __ pop(rax); // return address must be moved if SP is changed 559 __ mov(rsp, rbcp); 560 __ push(rax); 561 562 // Note: the restored frame is not necessarily interpreted. 563 // Use the shared runtime version of the StackOverflowError. 564 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 565 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 566 // all done with frame size check 567 __ bind(after_frame_check_pop); 568 NOT_LP64(__ pop(rsi)); 569 570 // all done with frame size check 571 __ bind(after_frame_check); 572 } 573 574 // Allocate monitor and lock method (asm interpreter) 575 // 576 // Args: 577 // rbx: Method* 578 // r14/rdi: locals 579 // 580 // Kills: 581 // rax 582 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 583 // rscratch1, rscratch2 (scratch regs) 584 void TemplateInterpreterGenerator::lock_method() { 585 // synchronize method 586 const Address access_flags(rbx, Method::access_flags_offset()); 587 const Address monitor_block_top( 588 rbp, 589 frame::interpreter_frame_monitor_block_top_offset * wordSize); 590 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 591 592 #ifdef ASSERT 593 { 594 Label L; 595 __ movl(rax, access_flags); 596 __ testl(rax, JVM_ACC_SYNCHRONIZED); 597 __ jcc(Assembler::notZero, L); 598 __ stop("method doesn't need synchronization"); 599 __ bind(L); 600 } 601 #endif // ASSERT 602 603 // get synchronization object 604 { 605 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 606 Label done; 607 __ movl(rax, access_flags); 608 __ testl(rax, JVM_ACC_STATIC); 609 // get receiver (assume this is frequent case) 610 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 611 __ jcc(Assembler::zero, done); 612 __ movptr(rax, Address(rbx, Method::const_offset())); 613 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); 614 __ movptr(rax, Address(rax, 615 ConstantPool::pool_holder_offset_in_bytes())); 616 __ movptr(rax, Address(rax, mirror_offset)); 617 618 #ifdef ASSERT 619 { 620 Label L; 621 __ testptr(rax, rax); 622 __ jcc(Assembler::notZero, L); 623 __ stop("synchronization object is NULL"); 624 __ bind(L); 625 } 626 #endif // ASSERT 627 628 __ bind(done); 629 } 630 631 // add space for monitor & lock 632 __ subptr(rsp, entry_size); // add space for a monitor entry 633 __ movptr(monitor_block_top, rsp); // set new monitor block top 634 // store object 635 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 636 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 637 __ movptr(lockreg, rsp); // object address 638 __ lock_object(lockreg); 639 } 640 641 // Generate a fixed interpreter frame. This is identical setup for 642 // interpreted methods and for native methods hence the shared code. 643 // 644 // Args: 645 // rax: return address 646 // rbx: Method* 647 // r14/rdi: pointer to locals 648 // r13/rsi: sender sp 649 // rdx: cp cache 650 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 651 // initialize fixed part of activation frame 652 __ push(rax); // save return address 653 __ enter(); // save old & set new rbp 654 __ push(rbcp); // set sender sp 655 __ push((int)NULL_WORD); // leave last_sp as null 656 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 657 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 658 __ push(rbx); // save Method* 659 if (ProfileInterpreter) { 660 Label method_data_continue; 661 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 662 __ testptr(rdx, rdx); 663 __ jcc(Assembler::zero, method_data_continue); 664 __ addptr(rdx, in_bytes(MethodData::data_offset())); 665 __ bind(method_data_continue); 666 __ push(rdx); // set the mdp (method data pointer) 667 } else { 668 __ push(0); 669 } 670 671 __ movptr(rdx, Address(rbx, Method::const_offset())); 672 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 673 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 674 __ push(rdx); // set constant pool cache 675 __ push(rlocals); // set locals pointer 676 if (native_call) { 677 __ push(0); // no bcp 678 } else { 679 __ push(rbcp); // set bcp 680 } 681 __ push(0); // reserve word for pointer to expression stack bottom 682 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 683 } 684 685 // End of helpers 686 687 // Method entry for java.lang.ref.Reference.get. 688 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 689 #if INCLUDE_ALL_GCS 690 // Code: _aload_0, _getfield, _areturn 691 // parameter size = 1 692 // 693 // The code that gets generated by this routine is split into 2 parts: 694 // 1. The "intrinsified" code for G1 (or any SATB based GC), 695 // 2. The slow path - which is an expansion of the regular method entry. 696 // 697 // Notes:- 698 // * In the G1 code we do not check whether we need to block for 699 // a safepoint. If G1 is enabled then we must execute the specialized 700 // code for Reference.get (except when the Reference object is null) 701 // so that we can log the value in the referent field with an SATB 702 // update buffer. 703 // If the code for the getfield template is modified so that the 704 // G1 pre-barrier code is executed when the current method is 705 // Reference.get() then going through the normal method entry 706 // will be fine. 707 // * The G1 code can, however, check the receiver object (the instance 708 // of java.lang.Reference) and jump to the slow path if null. If the 709 // Reference object is null then we obviously cannot fetch the referent 710 // and so we don't need to call the G1 pre-barrier. Thus we can use the 711 // regular method entry code to generate the NPE. 712 // 713 // rbx: Method* 714 715 // r13: senderSP must preserve for slow path, set SP to it on fast path 716 717 address entry = __ pc(); 718 719 const int referent_offset = java_lang_ref_Reference::referent_offset; 720 guarantee(referent_offset > 0, "referent offset not initialized"); 721 722 if (UseG1GC) { 723 Label slow_path; 724 // rbx: method 725 726 // Check if local 0 != NULL 727 // If the receiver is null then it is OK to jump to the slow path. 728 __ movptr(rax, Address(rsp, wordSize)); 729 730 __ testptr(rax, rax); 731 __ jcc(Assembler::zero, slow_path); 732 733 // rax: local 0 734 // rbx: method (but can be used as scratch now) 735 // rdx: scratch 736 // rdi: scratch 737 738 // Preserve the sender sp in case the pre-barrier 739 // calls the runtime 740 NOT_LP64(__ push(rsi)); 741 742 // Generate the G1 pre-barrier code to log the value of 743 // the referent field in an SATB buffer. 744 745 // Load the value of the referent field. 746 const Address field_address(rax, referent_offset); 747 __ load_heap_oop(rax, field_address); 748 749 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 750 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 751 NOT_LP64(__ get_thread(thread)); 752 753 // Generate the G1 pre-barrier code to log the value of 754 // the referent field in an SATB buffer. 755 __ g1_write_barrier_pre(noreg /* obj */, 756 rax /* pre_val */, 757 thread /* thread */, 758 rbx /* tmp */, 759 true /* tosca_live */, 760 true /* expand_call */); 761 762 // _areturn 763 NOT_LP64(__ pop(rsi)); // get sender sp 764 __ pop(rdi); // get return address 765 __ mov(rsp, sender_sp); // set sp to sender sp 766 __ jmp(rdi); 767 __ ret(0); 768 769 // generate a vanilla interpreter entry as the slow path 770 __ bind(slow_path); 771 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 772 return entry; 773 } 774 #endif // INCLUDE_ALL_GCS 775 776 // If G1 is not enabled then attempt to go through the accessor entry point 777 // Reference.get is an accessor 778 return NULL; 779 } 780 781 // Interpreter stub for calling a native method. (asm interpreter) 782 // This sets up a somewhat different looking stack for calling the 783 // native method than the typical interpreter frame setup. 784 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 785 // determine code generation flags 786 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 787 788 // rbx: Method* 789 // rbcp: sender sp 790 791 address entry_point = __ pc(); 792 793 const Address constMethod (rbx, Method::const_offset()); 794 const Address access_flags (rbx, Method::access_flags_offset()); 795 const Address size_of_parameters(rcx, ConstMethod:: 796 size_of_parameters_offset()); 797 798 799 // get parameter size (always needed) 800 __ movptr(rcx, constMethod); 801 __ load_unsigned_short(rcx, size_of_parameters); 802 803 // native calls don't need the stack size check since they have no 804 // expression stack and the arguments are already on the stack and 805 // we only add a handful of words to the stack 806 807 // rbx: Method* 808 // rcx: size of parameters 809 // rbcp: sender sp 810 __ pop(rax); // get return address 811 812 // for natives the size of locals is zero 813 814 // compute beginning of parameters 815 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 816 817 // add 2 zero-initialized slots for native calls 818 // initialize result_handler slot 819 __ push((int) NULL_WORD); 820 // slot for oop temp 821 // (static native method holder mirror/jni oop result) 822 __ push((int) NULL_WORD); 823 824 // initialize fixed part of activation frame 825 generate_fixed_frame(true); 826 827 // make sure method is native & not abstract 828 #ifdef ASSERT 829 __ movl(rax, access_flags); 830 { 831 Label L; 832 __ testl(rax, JVM_ACC_NATIVE); 833 __ jcc(Assembler::notZero, L); 834 __ stop("tried to execute non-native method as native"); 835 __ bind(L); 836 } 837 { 838 Label L; 839 __ testl(rax, JVM_ACC_ABSTRACT); 840 __ jcc(Assembler::zero, L); 841 __ stop("tried to execute abstract method in interpreter"); 842 __ bind(L); 843 } 844 #endif 845 846 // Since at this point in the method invocation the exception handler 847 // would try to exit the monitor of synchronized methods which hasn't 848 // been entered yet, we set the thread local variable 849 // _do_not_unlock_if_synchronized to true. The remove_activation will 850 // check this flag. 851 852 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 853 NOT_LP64(__ get_thread(thread1)); 854 const Address do_not_unlock_if_synchronized(thread1, 855 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 856 __ movbool(do_not_unlock_if_synchronized, true); 857 858 // increment invocation count & check for overflow 859 Label invocation_counter_overflow; 860 if (inc_counter) { 861 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 862 } 863 864 Label continue_after_compile; 865 __ bind(continue_after_compile); 866 867 bang_stack_shadow_pages(true); 868 869 // reset the _do_not_unlock_if_synchronized flag 870 NOT_LP64(__ get_thread(thread1)); 871 __ movbool(do_not_unlock_if_synchronized, false); 872 873 // check for synchronized methods 874 // Must happen AFTER invocation_counter check and stack overflow check, 875 // so method is not locked if overflows. 876 if (synchronized) { 877 lock_method(); 878 } else { 879 // no synchronization necessary 880 #ifdef ASSERT 881 { 882 Label L; 883 __ movl(rax, access_flags); 884 __ testl(rax, JVM_ACC_SYNCHRONIZED); 885 __ jcc(Assembler::zero, L); 886 __ stop("method needs synchronization"); 887 __ bind(L); 888 } 889 #endif 890 } 891 892 // start execution 893 #ifdef ASSERT 894 { 895 Label L; 896 const Address monitor_block_top(rbp, 897 frame::interpreter_frame_monitor_block_top_offset * wordSize); 898 __ movptr(rax, monitor_block_top); 899 __ cmpptr(rax, rsp); 900 __ jcc(Assembler::equal, L); 901 __ stop("broken stack frame setup in interpreter"); 902 __ bind(L); 903 } 904 #endif 905 906 // jvmti support 907 __ notify_method_entry(); 908 909 // work registers 910 const Register method = rbx; 911 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 912 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 913 914 // allocate space for parameters 915 __ get_method(method); 916 __ movptr(t, Address(method, Method::const_offset())); 917 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 918 919 #ifndef _LP64 920 __ shlptr(t, Interpreter::logStackElementSize); 921 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 922 __ subptr(rsp, t); 923 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 924 #else 925 __ shll(t, Interpreter::logStackElementSize); 926 927 __ subptr(rsp, t); 928 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 929 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 930 #endif // _LP64 931 932 // get signature handler 933 { 934 Label L; 935 __ movptr(t, Address(method, Method::signature_handler_offset())); 936 __ testptr(t, t); 937 __ jcc(Assembler::notZero, L); 938 __ call_VM(noreg, 939 CAST_FROM_FN_PTR(address, 940 InterpreterRuntime::prepare_native_call), 941 method); 942 __ get_method(method); 943 __ movptr(t, Address(method, Method::signature_handler_offset())); 944 __ bind(L); 945 } 946 947 // call signature handler 948 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 949 "adjust this code"); 950 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 951 "adjust this code"); 952 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 953 "adjust this code"); 954 955 // The generated handlers do not touch RBX (the method oop). 956 // However, large signatures cannot be cached and are generated 957 // each time here. The slow-path generator can do a GC on return, 958 // so we must reload it after the call. 959 __ call(t); 960 __ get_method(method); // slow path can do a GC, reload RBX 961 962 963 // result handler is in rax 964 // set result handler 965 __ movptr(Address(rbp, 966 (frame::interpreter_frame_result_handler_offset) * wordSize), 967 rax); 968 969 // pass mirror handle if static call 970 { 971 Label L; 972 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 973 __ movl(t, Address(method, Method::access_flags_offset())); 974 __ testl(t, JVM_ACC_STATIC); 975 __ jcc(Assembler::zero, L); 976 // get mirror 977 __ movptr(t, Address(method, Method::const_offset())); 978 __ movptr(t, Address(t, ConstMethod::constants_offset())); 979 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 980 __ movptr(t, Address(t, mirror_offset)); 981 // copy mirror into activation frame 982 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 983 t); 984 // pass handle to mirror 985 #ifndef _LP64 986 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 987 __ movptr(Address(rsp, wordSize), t); 988 #else 989 __ lea(c_rarg1, 990 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 991 #endif // _LP64 992 __ bind(L); 993 } 994 995 // get native function entry point 996 { 997 Label L; 998 __ movptr(rax, Address(method, Method::native_function_offset())); 999 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1000 __ cmpptr(rax, unsatisfied.addr()); 1001 __ jcc(Assembler::notEqual, L); 1002 __ call_VM(noreg, 1003 CAST_FROM_FN_PTR(address, 1004 InterpreterRuntime::prepare_native_call), 1005 method); 1006 __ get_method(method); 1007 __ movptr(rax, Address(method, Method::native_function_offset())); 1008 __ bind(L); 1009 } 1010 1011 // pass JNIEnv 1012 #ifndef _LP64 1013 __ get_thread(thread); 1014 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1015 __ movptr(Address(rsp, 0), t); 1016 1017 // set_last_Java_frame_before_call 1018 // It is enough that the pc() 1019 // points into the right code segment. It does not have to be the correct return pc. 1020 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1021 #else 1022 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1023 1024 // It is enough that the pc() points into the right code 1025 // segment. It does not have to be the correct return pc. 1026 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1027 #endif // _LP64 1028 1029 // change thread state 1030 #ifdef ASSERT 1031 { 1032 Label L; 1033 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1034 __ cmpl(t, _thread_in_Java); 1035 __ jcc(Assembler::equal, L); 1036 __ stop("Wrong thread state in native stub"); 1037 __ bind(L); 1038 } 1039 #endif 1040 1041 // Change state to native 1042 1043 __ movl(Address(thread, JavaThread::thread_state_offset()), 1044 _thread_in_native); 1045 1046 // Call the native method. 1047 __ call(rax); 1048 // 32: result potentially in rdx:rax or ST0 1049 // 64: result potentially in rax or xmm0 1050 1051 // Verify or restore cpu control state after JNI call 1052 __ restore_cpu_control_state_after_jni(); 1053 1054 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1055 // in order to extract the result of a method call. If the order of these 1056 // pushes change or anything else is added to the stack then the code in 1057 // interpreter_frame_result must also change. 1058 1059 #ifndef _LP64 1060 // save potential result in ST(0) & rdx:rax 1061 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1062 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1063 // It is safe to do this push because state is _thread_in_native and return address will be found 1064 // via _last_native_pc and not via _last_jave_sp 1065 1066 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1067 // If the order changes or anything else is added to the stack the code in 1068 // interpreter_frame_result will have to be changed. 1069 1070 { Label L; 1071 Label push_double; 1072 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1073 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1074 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1075 float_handler.addr()); 1076 __ jcc(Assembler::equal, push_double); 1077 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1078 double_handler.addr()); 1079 __ jcc(Assembler::notEqual, L); 1080 __ bind(push_double); 1081 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1082 __ bind(L); 1083 } 1084 #else 1085 __ push(dtos); 1086 #endif // _LP64 1087 1088 __ push(ltos); 1089 1090 // change thread state 1091 NOT_LP64(__ get_thread(thread)); 1092 __ movl(Address(thread, JavaThread::thread_state_offset()), 1093 _thread_in_native_trans); 1094 1095 if (os::is_MP()) { 1096 if (UseMembar) { 1097 // Force this write out before the read below 1098 __ membar(Assembler::Membar_mask_bits( 1099 Assembler::LoadLoad | Assembler::LoadStore | 1100 Assembler::StoreLoad | Assembler::StoreStore)); 1101 } else { 1102 // Write serialization page so VM thread can do a pseudo remote membar. 1103 // We use the current thread pointer to calculate a thread specific 1104 // offset to write to within the page. This minimizes bus traffic 1105 // due to cache line collision. 1106 __ serialize_memory(thread, rcx); 1107 } 1108 } 1109 1110 #ifndef _LP64 1111 if (AlwaysRestoreFPU) { 1112 // Make sure the control word is correct. 1113 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1114 } 1115 #endif // _LP64 1116 1117 // check for safepoint operation in progress and/or pending suspend requests 1118 { 1119 Label Continue; 1120 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1121 SafepointSynchronize::_not_synchronized); 1122 1123 Label L; 1124 __ jcc(Assembler::notEqual, L); 1125 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1126 __ jcc(Assembler::equal, Continue); 1127 __ bind(L); 1128 1129 // Don't use call_VM as it will see a possible pending exception 1130 // and forward it and never return here preventing us from 1131 // clearing _last_native_pc down below. Also can't use 1132 // call_VM_leaf either as it will check to see if r13 & r14 are 1133 // preserved and correspond to the bcp/locals pointers. So we do a 1134 // runtime call by hand. 1135 // 1136 #ifndef _LP64 1137 __ push(thread); 1138 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1139 JavaThread::check_special_condition_for_native_trans))); 1140 __ increment(rsp, wordSize); 1141 __ get_thread(thread); 1142 #else 1143 __ mov(c_rarg0, r15_thread); 1144 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1145 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1146 __ andptr(rsp, -16); // align stack as required by ABI 1147 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1148 __ mov(rsp, r12); // restore sp 1149 __ reinit_heapbase(); 1150 #endif // _LP64 1151 __ bind(Continue); 1152 } 1153 1154 // change thread state 1155 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1156 1157 // reset_last_Java_frame 1158 __ reset_last_Java_frame(thread, true, true); 1159 1160 // reset handle block 1161 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1162 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1163 1164 // If result is an oop unbox and store it in frame where gc will see it 1165 // and result handler will pick it up 1166 1167 { 1168 Label no_oop, store_result; 1169 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1170 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1171 __ jcc(Assembler::notEqual, no_oop); 1172 // retrieve result 1173 __ pop(ltos); 1174 __ testptr(rax, rax); 1175 __ jcc(Assembler::zero, store_result); 1176 __ movptr(rax, Address(rax, 0)); 1177 __ bind(store_result); 1178 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1179 // keep stack depth as expected by pushing oop which will eventually be discarded 1180 __ push(ltos); 1181 __ bind(no_oop); 1182 } 1183 1184 1185 { 1186 Label no_reguard; 1187 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1188 JavaThread::stack_guard_yellow_reserved_disabled); 1189 __ jcc(Assembler::notEqual, no_reguard); 1190 1191 __ pusha(); // XXX only save smashed registers 1192 #ifndef _LP64 1193 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1194 __ popa(); 1195 #else 1196 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1197 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1198 __ andptr(rsp, -16); // align stack as required by ABI 1199 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1200 __ mov(rsp, r12); // restore sp 1201 __ popa(); // XXX only restore smashed registers 1202 __ reinit_heapbase(); 1203 #endif // _LP64 1204 1205 __ bind(no_reguard); 1206 } 1207 1208 1209 // The method register is junk from after the thread_in_native transition 1210 // until here. Also can't call_VM until the bcp has been 1211 // restored. Need bcp for throwing exception below so get it now. 1212 __ get_method(method); 1213 1214 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1215 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1216 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1217 1218 // handle exceptions (exception handling will handle unlocking!) 1219 { 1220 Label L; 1221 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1222 __ jcc(Assembler::zero, L); 1223 // Note: At some point we may want to unify this with the code 1224 // used in call_VM_base(); i.e., we should use the 1225 // StubRoutines::forward_exception code. For now this doesn't work 1226 // here because the rsp is not correctly set at this point. 1227 __ MacroAssembler::call_VM(noreg, 1228 CAST_FROM_FN_PTR(address, 1229 InterpreterRuntime::throw_pending_exception)); 1230 __ should_not_reach_here(); 1231 __ bind(L); 1232 } 1233 1234 // do unlocking if necessary 1235 { 1236 Label L; 1237 __ movl(t, Address(method, Method::access_flags_offset())); 1238 __ testl(t, JVM_ACC_SYNCHRONIZED); 1239 __ jcc(Assembler::zero, L); 1240 // the code below should be shared with interpreter macro 1241 // assembler implementation 1242 { 1243 Label unlock; 1244 // BasicObjectLock will be first in list, since this is a 1245 // synchronized method. However, need to check that the object 1246 // has not been unlocked by an explicit monitorexit bytecode. 1247 const Address monitor(rbp, 1248 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1249 wordSize - (int)sizeof(BasicObjectLock))); 1250 1251 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1252 1253 // monitor expect in c_rarg1 for slow unlock path 1254 __ lea(regmon, monitor); // address of first monitor 1255 1256 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1257 __ testptr(t, t); 1258 __ jcc(Assembler::notZero, unlock); 1259 1260 // Entry already unlocked, need to throw exception 1261 __ MacroAssembler::call_VM(noreg, 1262 CAST_FROM_FN_PTR(address, 1263 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1264 __ should_not_reach_here(); 1265 1266 __ bind(unlock); 1267 __ unlock_object(regmon); 1268 } 1269 __ bind(L); 1270 } 1271 1272 // jvmti support 1273 // Note: This must happen _after_ handling/throwing any exceptions since 1274 // the exception handler code notifies the runtime of method exits 1275 // too. If this happens before, method entry/exit notifications are 1276 // not properly paired (was bug - gri 11/22/99). 1277 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1278 1279 // restore potential result in edx:eax, call result handler to 1280 // restore potential result in ST0 & handle result 1281 1282 __ pop(ltos); 1283 LP64_ONLY( __ pop(dtos)); 1284 1285 __ movptr(t, Address(rbp, 1286 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1287 __ call(t); 1288 1289 // remove activation 1290 __ movptr(t, Address(rbp, 1291 frame::interpreter_frame_sender_sp_offset * 1292 wordSize)); // get sender sp 1293 __ leave(); // remove frame anchor 1294 __ pop(rdi); // get return address 1295 __ mov(rsp, t); // set sp to sender sp 1296 __ jmp(rdi); 1297 1298 if (inc_counter) { 1299 // Handle overflow of counter and compile method 1300 __ bind(invocation_counter_overflow); 1301 generate_counter_overflow(continue_after_compile); 1302 } 1303 1304 return entry_point; 1305 } 1306 1307 // 1308 // Generic interpreted method entry to (asm) interpreter 1309 // 1310 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1311 // determine code generation flags 1312 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1313 1314 // ebx: Method* 1315 // rbcp: sender sp 1316 address entry_point = __ pc(); 1317 1318 const Address constMethod(rbx, Method::const_offset()); 1319 const Address access_flags(rbx, Method::access_flags_offset()); 1320 const Address size_of_parameters(rdx, 1321 ConstMethod::size_of_parameters_offset()); 1322 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1323 1324 1325 // get parameter size (always needed) 1326 __ movptr(rdx, constMethod); 1327 __ load_unsigned_short(rcx, size_of_parameters); 1328 1329 // rbx: Method* 1330 // rcx: size of parameters 1331 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1332 1333 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1334 __ subl(rdx, rcx); // rdx = no. of additional locals 1335 1336 // YYY 1337 // __ incrementl(rdx); 1338 // __ andl(rdx, -2); 1339 1340 // see if we've got enough room on the stack for locals plus overhead. 1341 generate_stack_overflow_check(); 1342 1343 // get return address 1344 __ pop(rax); 1345 1346 // compute beginning of parameters 1347 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1348 1349 // rdx - # of additional locals 1350 // allocate space for locals 1351 // explicitly initialize locals 1352 { 1353 Label exit, loop; 1354 __ testl(rdx, rdx); 1355 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1356 __ bind(loop); 1357 __ push((int) NULL_WORD); // initialize local variables 1358 __ decrementl(rdx); // until everything initialized 1359 __ jcc(Assembler::greater, loop); 1360 __ bind(exit); 1361 } 1362 1363 // initialize fixed part of activation frame 1364 generate_fixed_frame(false); 1365 1366 // make sure method is not native & not abstract 1367 #ifdef ASSERT 1368 __ movl(rax, access_flags); 1369 { 1370 Label L; 1371 __ testl(rax, JVM_ACC_NATIVE); 1372 __ jcc(Assembler::zero, L); 1373 __ stop("tried to execute native method as non-native"); 1374 __ bind(L); 1375 } 1376 { 1377 Label L; 1378 __ testl(rax, JVM_ACC_ABSTRACT); 1379 __ jcc(Assembler::zero, L); 1380 __ stop("tried to execute abstract method in interpreter"); 1381 __ bind(L); 1382 } 1383 #endif 1384 1385 // Since at this point in the method invocation the exception 1386 // handler would try to exit the monitor of synchronized methods 1387 // which hasn't been entered yet, we set the thread local variable 1388 // _do_not_unlock_if_synchronized to true. The remove_activation 1389 // will check this flag. 1390 1391 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1392 NOT_LP64(__ get_thread(thread)); 1393 const Address do_not_unlock_if_synchronized(thread, 1394 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1395 __ movbool(do_not_unlock_if_synchronized, true); 1396 1397 __ profile_parameters_type(rax, rcx, rdx); 1398 // increment invocation count & check for overflow 1399 Label invocation_counter_overflow; 1400 Label profile_method; 1401 Label profile_method_continue; 1402 if (inc_counter) { 1403 generate_counter_incr(&invocation_counter_overflow, 1404 &profile_method, 1405 &profile_method_continue); 1406 if (ProfileInterpreter) { 1407 __ bind(profile_method_continue); 1408 } 1409 } 1410 1411 Label continue_after_compile; 1412 __ bind(continue_after_compile); 1413 1414 // check for synchronized interpreted methods 1415 bang_stack_shadow_pages(false); 1416 1417 // reset the _do_not_unlock_if_synchronized flag 1418 NOT_LP64(__ get_thread(thread)); 1419 __ movbool(do_not_unlock_if_synchronized, false); 1420 1421 // check for synchronized methods 1422 // Must happen AFTER invocation_counter check and stack overflow check, 1423 // so method is not locked if overflows. 1424 if (synchronized) { 1425 // Allocate monitor and lock method 1426 lock_method(); 1427 } else { 1428 // no synchronization necessary 1429 #ifdef ASSERT 1430 { 1431 Label L; 1432 __ movl(rax, access_flags); 1433 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1434 __ jcc(Assembler::zero, L); 1435 __ stop("method needs synchronization"); 1436 __ bind(L); 1437 } 1438 #endif 1439 } 1440 1441 // start execution 1442 #ifdef ASSERT 1443 { 1444 Label L; 1445 const Address monitor_block_top (rbp, 1446 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1447 __ movptr(rax, monitor_block_top); 1448 __ cmpptr(rax, rsp); 1449 __ jcc(Assembler::equal, L); 1450 __ stop("broken stack frame setup in interpreter"); 1451 __ bind(L); 1452 } 1453 #endif 1454 1455 // jvmti support 1456 __ notify_method_entry(); 1457 1458 __ dispatch_next(vtos); 1459 1460 // invocation counter overflow 1461 if (inc_counter) { 1462 if (ProfileInterpreter) { 1463 // We have decided to profile this method in the interpreter 1464 __ bind(profile_method); 1465 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1466 __ set_method_data_pointer_for_bcp(); 1467 __ get_method(rbx); 1468 __ jmp(profile_method_continue); 1469 } 1470 // Handle overflow of counter and compile method 1471 __ bind(invocation_counter_overflow); 1472 generate_counter_overflow(continue_after_compile); 1473 } 1474 1475 return entry_point; 1476 } 1477 1478 //----------------------------------------------------------------------------- 1479 // Exceptions 1480 1481 void TemplateInterpreterGenerator::generate_throw_exception() { 1482 // Entry point in previous activation (i.e., if the caller was 1483 // interpreted) 1484 Interpreter::_rethrow_exception_entry = __ pc(); 1485 // Restore sp to interpreter_frame_last_sp even though we are going 1486 // to empty the expression stack for the exception processing. 1487 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1488 // rax: exception 1489 // rdx: return address/pc that threw exception 1490 __ restore_bcp(); // r13/rsi points to call/send 1491 __ restore_locals(); 1492 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1493 // Entry point for exceptions thrown within interpreter code 1494 Interpreter::_throw_exception_entry = __ pc(); 1495 // expression stack is undefined here 1496 // rax: exception 1497 // r13/rsi: exception bcp 1498 __ verify_oop(rax); 1499 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1500 LP64_ONLY(__ mov(c_rarg1, rax)); 1501 1502 // expression stack must be empty before entering the VM in case of 1503 // an exception 1504 __ empty_expression_stack(); 1505 // find exception handler address and preserve exception oop 1506 __ call_VM(rdx, 1507 CAST_FROM_FN_PTR(address, 1508 InterpreterRuntime::exception_handler_for_exception), 1509 rarg); 1510 // rax: exception handler entry point 1511 // rdx: preserved exception oop 1512 // r13/rsi: bcp for exception handler 1513 __ push_ptr(rdx); // push exception which is now the only value on the stack 1514 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1515 1516 // If the exception is not handled in the current frame the frame is 1517 // removed and the exception is rethrown (i.e. exception 1518 // continuation is _rethrow_exception). 1519 // 1520 // Note: At this point the bci is still the bxi for the instruction 1521 // which caused the exception and the expression stack is 1522 // empty. Thus, for any VM calls at this point, GC will find a legal 1523 // oop map (with empty expression stack). 1524 1525 // In current activation 1526 // tos: exception 1527 // esi: exception bcp 1528 1529 // 1530 // JVMTI PopFrame support 1531 // 1532 1533 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1534 __ empty_expression_stack(); 1535 // Set the popframe_processing bit in pending_popframe_condition 1536 // indicating that we are currently handling popframe, so that 1537 // call_VMs that may happen later do not trigger new popframe 1538 // handling cycles. 1539 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1540 NOT_LP64(__ get_thread(thread)); 1541 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1542 __ orl(rdx, JavaThread::popframe_processing_bit); 1543 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1544 1545 { 1546 // Check to see whether we are returning to a deoptimized frame. 1547 // (The PopFrame call ensures that the caller of the popped frame is 1548 // either interpreted or compiled and deoptimizes it if compiled.) 1549 // In this case, we can't call dispatch_next() after the frame is 1550 // popped, but instead must save the incoming arguments and restore 1551 // them after deoptimization has occurred. 1552 // 1553 // Note that we don't compare the return PC against the 1554 // deoptimization blob's unpack entry because of the presence of 1555 // adapter frames in C2. 1556 Label caller_not_deoptimized; 1557 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1558 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1559 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1560 InterpreterRuntime::interpreter_contains), rarg); 1561 __ testl(rax, rax); 1562 __ jcc(Assembler::notZero, caller_not_deoptimized); 1563 1564 // Compute size of arguments for saving when returning to 1565 // deoptimized caller 1566 __ get_method(rax); 1567 __ movptr(rax, Address(rax, Method::const_offset())); 1568 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1569 size_of_parameters_offset()))); 1570 __ shll(rax, Interpreter::logStackElementSize); 1571 __ restore_locals(); 1572 __ subptr(rlocals, rax); 1573 __ addptr(rlocals, wordSize); 1574 // Save these arguments 1575 NOT_LP64(__ get_thread(thread)); 1576 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1577 Deoptimization:: 1578 popframe_preserve_args), 1579 thread, rax, rlocals); 1580 1581 __ remove_activation(vtos, rdx, 1582 /* throw_monitor_exception */ false, 1583 /* install_monitor_exception */ false, 1584 /* notify_jvmdi */ false); 1585 1586 // Inform deoptimization that it is responsible for restoring 1587 // these arguments 1588 NOT_LP64(__ get_thread(thread)); 1589 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1590 JavaThread::popframe_force_deopt_reexecution_bit); 1591 1592 // Continue in deoptimization handler 1593 __ jmp(rdx); 1594 1595 __ bind(caller_not_deoptimized); 1596 } 1597 1598 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1599 /* throw_monitor_exception */ false, 1600 /* install_monitor_exception */ false, 1601 /* notify_jvmdi */ false); 1602 1603 // Finish with popframe handling 1604 // A previous I2C followed by a deoptimization might have moved the 1605 // outgoing arguments further up the stack. PopFrame expects the 1606 // mutations to those outgoing arguments to be preserved and other 1607 // constraints basically require this frame to look exactly as 1608 // though it had previously invoked an interpreted activation with 1609 // no space between the top of the expression stack (current 1610 // last_sp) and the top of stack. Rather than force deopt to 1611 // maintain this kind of invariant all the time we call a small 1612 // fixup routine to move the mutated arguments onto the top of our 1613 // expression stack if necessary. 1614 #ifndef _LP64 1615 __ mov(rax, rsp); 1616 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1617 __ get_thread(thread); 1618 // PC must point into interpreter here 1619 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1620 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1621 __ get_thread(thread); 1622 #else 1623 __ mov(c_rarg1, rsp); 1624 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1625 // PC must point into interpreter here 1626 __ set_last_Java_frame(noreg, rbp, __ pc()); 1627 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1628 #endif 1629 __ reset_last_Java_frame(thread, true, true); 1630 1631 // Restore the last_sp and null it out 1632 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1633 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1634 1635 __ restore_bcp(); 1636 __ restore_locals(); 1637 // The method data pointer was incremented already during 1638 // call profiling. We have to restore the mdp for the current bcp. 1639 if (ProfileInterpreter) { 1640 __ set_method_data_pointer_for_bcp(); 1641 } 1642 1643 // Clear the popframe condition flag 1644 NOT_LP64(__ get_thread(thread)); 1645 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1646 JavaThread::popframe_inactive); 1647 1648 #if INCLUDE_JVMTI 1649 { 1650 Label L_done; 1651 const Register local0 = rlocals; 1652 1653 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1654 __ jcc(Assembler::notEqual, L_done); 1655 1656 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1657 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1658 1659 __ get_method(rdx); 1660 __ movptr(rax, Address(local0, 0)); 1661 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1662 1663 __ testptr(rax, rax); 1664 __ jcc(Assembler::zero, L_done); 1665 1666 __ movptr(Address(rbx, 0), rax); 1667 __ bind(L_done); 1668 } 1669 #endif // INCLUDE_JVMTI 1670 1671 __ dispatch_next(vtos); 1672 // end of PopFrame support 1673 1674 Interpreter::_remove_activation_entry = __ pc(); 1675 1676 // preserve exception over this code sequence 1677 __ pop_ptr(rax); 1678 NOT_LP64(__ get_thread(thread)); 1679 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1680 // remove the activation (without doing throws on illegalMonitorExceptions) 1681 __ remove_activation(vtos, rdx, false, true, false); 1682 // restore exception 1683 NOT_LP64(__ get_thread(thread)); 1684 __ get_vm_result(rax, thread); 1685 1686 // In between activations - previous activation type unknown yet 1687 // compute continuation point - the continuation point expects the 1688 // following registers set up: 1689 // 1690 // rax: exception 1691 // rdx: return address/pc that threw exception 1692 // rsp: expression stack of caller 1693 // rbp: ebp of caller 1694 __ push(rax); // save exception 1695 __ push(rdx); // save return address 1696 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1697 SharedRuntime::exception_handler_for_return_address), 1698 thread, rdx); 1699 __ mov(rbx, rax); // save exception handler 1700 __ pop(rdx); // restore return address 1701 __ pop(rax); // restore exception 1702 // Note that an "issuing PC" is actually the next PC after the call 1703 __ jmp(rbx); // jump to exception 1704 // handler of caller 1705 } 1706 1707 1708 // 1709 // JVMTI ForceEarlyReturn support 1710 // 1711 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1712 address entry = __ pc(); 1713 1714 __ restore_bcp(); 1715 __ restore_locals(); 1716 __ empty_expression_stack(); 1717 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1718 1719 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1720 NOT_LP64(__ get_thread(thread)); 1721 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1722 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1723 1724 // Clear the earlyret state 1725 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1726 1727 __ remove_activation(state, rsi, 1728 false, /* throw_monitor_exception */ 1729 false, /* install_monitor_exception */ 1730 true); /* notify_jvmdi */ 1731 __ jmp(rsi); 1732 1733 return entry; 1734 } // end of ForceEarlyReturn support 1735 1736 1737 //----------------------------------------------------------------------------- 1738 // Helper for vtos entry point generation 1739 1740 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1741 address& bep, 1742 address& cep, 1743 address& sep, 1744 address& aep, 1745 address& iep, 1746 address& lep, 1747 address& fep, 1748 address& dep, 1749 address& vep) { 1750 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1751 Label L; 1752 aep = __ pc(); __ push_ptr(); __ jmp(L); 1753 #ifndef _LP64 1754 fep = __ pc(); __ push(ftos); __ jmp(L); 1755 dep = __ pc(); __ push(dtos); __ jmp(L); 1756 #else 1757 fep = __ pc(); __ push_f(xmm0); __ jmp(L); 1758 dep = __ pc(); __ push_d(xmm0); __ jmp(L); 1759 #endif // _LP64 1760 lep = __ pc(); __ push_l(); __ jmp(L); 1761 bep = cep = sep = 1762 iep = __ pc(); __ push_i(); 1763 vep = __ pc(); 1764 __ bind(L); 1765 generate_and_dispatch(t); 1766 } 1767 1768 //----------------------------------------------------------------------------- 1769 1770 // Non-product code 1771 #ifndef PRODUCT 1772 1773 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1774 address entry = __ pc(); 1775 1776 #ifndef _LP64 1777 // prepare expression stack 1778 __ pop(rcx); // pop return address so expression stack is 'pure' 1779 __ push(state); // save tosca 1780 1781 // pass tosca registers as arguments & call tracer 1782 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx); 1783 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1784 __ pop(state); // restore tosca 1785 1786 // return 1787 __ jmp(rcx); 1788 #else 1789 __ push(state); 1790 __ push(c_rarg0); 1791 __ push(c_rarg1); 1792 __ push(c_rarg2); 1793 __ push(c_rarg3); 1794 __ mov(c_rarg2, rax); // Pass itos 1795 #ifdef _WIN64 1796 __ movflt(xmm3, xmm0); // Pass ftos 1797 #endif 1798 __ call_VM(noreg, 1799 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), 1800 c_rarg1, c_rarg2, c_rarg3); 1801 __ pop(c_rarg3); 1802 __ pop(c_rarg2); 1803 __ pop(c_rarg1); 1804 __ pop(c_rarg0); 1805 __ pop(state); 1806 __ ret(0); // return from result handler 1807 #endif // _LP64 1808 1809 return entry; 1810 } 1811 1812 void TemplateInterpreterGenerator::count_bytecode() { 1813 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1814 } 1815 1816 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1817 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1818 } 1819 1820 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1821 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1822 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1823 __ orl(rbx, 1824 ((int) t->bytecode()) << 1825 BytecodePairHistogram::log2_number_of_codes); 1826 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1827 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1828 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1829 } 1830 1831 1832 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1833 // Call a little run-time stub to avoid blow-up for each bytecode. 1834 // The run-time runtime saves the right registers, depending on 1835 // the tosca in-state for the given template. 1836 1837 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1838 "entry must have been generated"); 1839 #ifndef _LP64 1840 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1841 #else 1842 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1843 __ andptr(rsp, -16); // align stack as required by ABI 1844 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1845 __ mov(rsp, r12); // restore sp 1846 __ reinit_heapbase(); 1847 #endif // _LP64 1848 } 1849 1850 1851 void TemplateInterpreterGenerator::stop_interpreter_at() { 1852 Label L; 1853 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1854 StopInterpreterAt); 1855 __ jcc(Assembler::notEqual, L); 1856 __ int3(); 1857 __ bind(L); 1858 } 1859 #endif // !PRODUCT