1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 #define __ _masm-> 51 52 // Size of interpreter code. Increase if too small. Interpreter will 53 // fail with a guarantee ("not enough space for interpreter generation"); 54 // if too small. 55 // Run with +PrintInterpreter to get the VM to print out the size. 56 // Max size with JVMTI 57 #ifdef AMD64 58 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; 59 #else 60 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 61 #endif // AMD64 62 63 // Global Register Names 64 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 65 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 66 67 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 68 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 69 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 70 71 72 //----------------------------------------------------------------------------- 73 74 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 75 address entry = __ pc(); 76 77 #ifdef ASSERT 78 { 79 Label L; 80 __ lea(rax, Address(rbp, 81 frame::interpreter_frame_monitor_block_top_offset * 82 wordSize)); 83 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 84 // grows negative) 85 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 86 __ stop ("interpreter frame not set up"); 87 __ bind(L); 88 } 89 #endif // ASSERT 90 // Restore bcp under the assumption that the current frame is still 91 // interpreted 92 __ restore_bcp(); 93 94 // expression stack must be empty before entering the VM if an 95 // exception happened 96 __ empty_expression_stack(); 97 // throw exception 98 __ call_VM(noreg, 99 CAST_FROM_FN_PTR(address, 100 InterpreterRuntime::throw_StackOverflowError)); 101 return entry; 102 } 103 104 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 105 const char* name) { 106 address entry = __ pc(); 107 // expression stack must be empty before entering the VM if an 108 // exception happened 109 __ empty_expression_stack(); 110 // setup parameters 111 // ??? convention: expect aberrant index in register ebx 112 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 113 __ lea(rarg, ExternalAddress((address)name)); 114 __ call_VM(noreg, 115 CAST_FROM_FN_PTR(address, 116 InterpreterRuntime:: 117 throw_ArrayIndexOutOfBoundsException), 118 rarg, rbx); 119 return entry; 120 } 121 122 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 123 address entry = __ pc(); 124 125 // object is at TOS 126 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 127 __ pop(rarg); 128 129 // expression stack must be empty before entering the VM if an 130 // exception happened 131 __ empty_expression_stack(); 132 133 __ call_VM(noreg, 134 CAST_FROM_FN_PTR(address, 135 InterpreterRuntime:: 136 throw_ClassCastException), 137 rarg); 138 return entry; 139 } 140 141 address TemplateInterpreterGenerator::generate_exception_handler_common( 142 const char* name, const char* message, bool pass_oop) { 143 assert(!pass_oop || message == NULL, "either oop or message but not both"); 144 address entry = __ pc(); 145 146 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 147 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 148 149 if (pass_oop) { 150 // object is at TOS 151 __ pop(rarg2); 152 } 153 // expression stack must be empty before entering the VM if an 154 // exception happened 155 __ empty_expression_stack(); 156 // setup parameters 157 __ lea(rarg, ExternalAddress((address)name)); 158 if (pass_oop) { 159 __ call_VM(rax, CAST_FROM_FN_PTR(address, 160 InterpreterRuntime:: 161 create_klass_exception), 162 rarg, rarg2); 163 } else { 164 __ lea(rarg2, ExternalAddress((address)message)); 165 __ call_VM(rax, 166 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 167 rarg, rarg2); 168 } 169 // throw exception 170 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 171 return entry; 172 } 173 174 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 175 address entry = __ pc(); 176 177 #ifndef _LP64 178 #ifdef COMPILER2 179 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 180 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 181 for (int i = 1; i < 8; i++) { 182 __ ffree(i); 183 } 184 } else if (UseSSE < 2) { 185 __ empty_FPU_stack(); 186 } 187 #endif // COMPILER2 188 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 189 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 190 } else { 191 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 192 } 193 194 if (state == ftos) { 195 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 196 } else if (state == dtos) { 197 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 198 } 199 #endif // _LP64 200 201 // Restore stack bottom in case i2c adjusted stack 202 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 203 // and NULL it as marker that esp is now tos until next java call 204 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 205 206 __ restore_bcp(); 207 __ restore_locals(); 208 209 if (state == atos) { 210 Register mdp = rbx; 211 Register tmp = rcx; 212 __ profile_return_type(mdp, rax, tmp); 213 } 214 215 const Register cache = rbx; 216 const Register index = rcx; 217 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 218 219 const Register flags = cache; 220 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 221 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 222 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 223 224 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 225 if (JvmtiExport::can_pop_frame()) { 226 NOT_LP64(__ get_thread(java_thread)); 227 __ check_and_handle_popframe(java_thread); 228 } 229 if (JvmtiExport::can_force_early_return()) { 230 NOT_LP64(__ get_thread(java_thread)); 231 __ check_and_handle_earlyret(java_thread); 232 } 233 234 __ dispatch_next(state, step); 235 236 return entry; 237 } 238 239 240 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 241 address entry = __ pc(); 242 243 #ifndef _LP64 244 if (state == ftos) { 245 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 246 } else if (state == dtos) { 247 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 248 } 249 #endif // _LP64 250 251 // NULL last_sp until next java call 252 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 253 __ restore_bcp(); 254 __ restore_locals(); 255 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 256 NOT_LP64(__ get_thread(thread)); 257 #if INCLUDE_JVMCI 258 // Check if we need to take lock at entry of synchronized method. This can 259 // only occur on method entry so emit it only for vtos with step 0. 260 if ((UseJVMCICompiler || UseAOT) && state == vtos && step == 0) { 261 Label L; 262 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 263 __ jcc(Assembler::zero, L); 264 // Clear flag. 265 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 266 // Satisfy calling convention for lock_method(). 267 __ get_method(rbx); 268 // Take lock. 269 lock_method(); 270 __ bind(L); 271 } else { 272 #ifdef ASSERT 273 if (UseJVMCICompiler) { 274 Label L; 275 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 276 __ jccb(Assembler::zero, L); 277 __ stop("unexpected pending monitor in deopt entry"); 278 __ bind(L); 279 } 280 #endif 281 } 282 #endif 283 // handle exceptions 284 { 285 Label L; 286 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 287 __ jcc(Assembler::zero, L); 288 __ call_VM(noreg, 289 CAST_FROM_FN_PTR(address, 290 InterpreterRuntime::throw_pending_exception)); 291 __ should_not_reach_here(); 292 __ bind(L); 293 } 294 __ dispatch_next(state, step); 295 return entry; 296 } 297 298 address TemplateInterpreterGenerator::generate_result_handler_for( 299 BasicType type) { 300 address entry = __ pc(); 301 switch (type) { 302 case T_BOOLEAN: __ c2bool(rax); break; 303 #ifndef _LP64 304 case T_CHAR : __ andptr(rax, 0xFFFF); break; 305 #else 306 case T_CHAR : __ movzwl(rax, rax); break; 307 #endif // _LP64 308 case T_BYTE : __ sign_extend_byte(rax); break; 309 case T_SHORT : __ sign_extend_short(rax); break; 310 case T_INT : /* nothing to do */ break; 311 case T_LONG : /* nothing to do */ break; 312 case T_VOID : /* nothing to do */ break; 313 #ifndef _LP64 314 case T_DOUBLE : 315 case T_FLOAT : 316 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 317 __ pop(t); // remove return address first 318 // Must return a result for interpreter or compiler. In SSE 319 // mode, results are returned in xmm0 and the FPU stack must 320 // be empty. 321 if (type == T_FLOAT && UseSSE >= 1) { 322 // Load ST0 323 __ fld_d(Address(rsp, 0)); 324 // Store as float and empty fpu stack 325 __ fstp_s(Address(rsp, 0)); 326 // and reload 327 __ movflt(xmm0, Address(rsp, 0)); 328 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 329 __ movdbl(xmm0, Address(rsp, 0)); 330 } else { 331 // restore ST0 332 __ fld_d(Address(rsp, 0)); 333 } 334 // and pop the temp 335 __ addptr(rsp, 2 * wordSize); 336 __ push(t); // restore return address 337 } 338 break; 339 #else 340 case T_FLOAT : /* nothing to do */ break; 341 case T_DOUBLE : /* nothing to do */ break; 342 #endif // _LP64 343 344 case T_VALUETYPE: // fall through (value types are handled with oops) 345 case T_OBJECT : 346 // retrieve result from frame 347 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 348 // and verify it 349 __ verify_oop(rax); 350 break; 351 default : ShouldNotReachHere(); 352 } 353 __ ret(0); // return from result handler 354 return entry; 355 } 356 357 address TemplateInterpreterGenerator::generate_safept_entry_for( 358 TosState state, 359 address runtime_entry) { 360 address entry = __ pc(); 361 __ push(state); 362 __ call_VM(noreg, runtime_entry); 363 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 364 return entry; 365 } 366 367 368 369 // Helpers for commoning out cases in the various type of method entries. 370 // 371 372 373 // increment invocation count & check for overflow 374 // 375 // Note: checking for negative value instead of overflow 376 // so we have a 'sticky' overflow test 377 // 378 // rbx: method 379 // rcx: invocation counter 380 // 381 void TemplateInterpreterGenerator::generate_counter_incr( 382 Label* overflow, 383 Label* profile_method, 384 Label* profile_method_continue) { 385 Label done; 386 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 387 if (TieredCompilation) { 388 int increment = InvocationCounter::count_increment; 389 Label no_mdo; 390 if (ProfileInterpreter) { 391 // Are we profiling? 392 __ movptr(rax, Address(rbx, Method::method_data_offset())); 393 __ testptr(rax, rax); 394 __ jccb(Assembler::zero, no_mdo); 395 // Increment counter in the MDO 396 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 397 in_bytes(InvocationCounter::counter_offset())); 398 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 399 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 400 __ jmp(done); 401 } 402 __ bind(no_mdo); 403 // Increment counter in MethodCounters 404 const Address invocation_counter(rax, 405 MethodCounters::invocation_counter_offset() + 406 InvocationCounter::counter_offset()); 407 __ get_method_counters(rbx, rax, done); 408 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 409 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 410 false, Assembler::zero, overflow); 411 __ bind(done); 412 } else { // not TieredCompilation 413 const Address backedge_counter(rax, 414 MethodCounters::backedge_counter_offset() + 415 InvocationCounter::counter_offset()); 416 const Address invocation_counter(rax, 417 MethodCounters::invocation_counter_offset() + 418 InvocationCounter::counter_offset()); 419 420 __ get_method_counters(rbx, rax, done); 421 422 if (ProfileInterpreter) { 423 __ incrementl(Address(rax, 424 MethodCounters::interpreter_invocation_counter_offset())); 425 } 426 // Update standard invocation counters 427 __ movl(rcx, invocation_counter); 428 __ incrementl(rcx, InvocationCounter::count_increment); 429 __ movl(invocation_counter, rcx); // save invocation count 430 431 __ movl(rax, backedge_counter); // load backedge counter 432 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 433 434 __ addl(rcx, rax); // add both counters 435 436 // profile_method is non-null only for interpreted method so 437 // profile_method != NULL == !native_call 438 439 if (ProfileInterpreter && profile_method != NULL) { 440 // Test to see if we should create a method data oop 441 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 442 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 443 __ jcc(Assembler::less, *profile_method_continue); 444 445 // if no method data exists, go to profile_method 446 __ test_method_data_pointer(rax, *profile_method); 447 } 448 449 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 450 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 451 __ jcc(Assembler::aboveEqual, *overflow); 452 __ bind(done); 453 } 454 } 455 456 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 457 458 // Asm interpreter on entry 459 // r14/rdi - locals 460 // r13/rsi - bcp 461 // rbx - method 462 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 463 // rbp - interpreter frame 464 465 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 466 // Everything as it was on entry 467 // rdx is not restored. Doesn't appear to really be set. 468 469 // InterpreterRuntime::frequency_counter_overflow takes two 470 // arguments, the first (thread) is passed by call_VM, the second 471 // indicates if the counter overflow occurs at a backwards branch 472 // (NULL bcp). We pass zero for it. The call returns the address 473 // of the verified entry point for the method or NULL if the 474 // compilation did not complete (either went background or bailed 475 // out). 476 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 477 __ movl(rarg, 0); 478 __ call_VM(noreg, 479 CAST_FROM_FN_PTR(address, 480 InterpreterRuntime::frequency_counter_overflow), 481 rarg); 482 483 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 484 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 485 // and jump to the interpreted entry. 486 __ jmp(do_continue, relocInfo::none); 487 } 488 489 // See if we've got enough room on the stack for locals plus overhead below 490 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 491 // without going through the signal handler, i.e., reserved and yellow zones 492 // will not be made usable. The shadow zone must suffice to handle the 493 // overflow. 494 // The expression stack grows down incrementally, so the normal guard 495 // page mechanism will work for that. 496 // 497 // NOTE: Since the additional locals are also always pushed (wasn't 498 // obvious in generate_fixed_frame) so the guard should work for them 499 // too. 500 // 501 // Args: 502 // rdx: number of additional locals this frame needs (what we must check) 503 // rbx: Method* 504 // 505 // Kills: 506 // rax 507 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 508 509 // monitor entry size: see picture of stack in frame_x86.hpp 510 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 511 512 // total overhead size: entry_size + (saved rbp through expr stack 513 // bottom). be sure to change this if you add/subtract anything 514 // to/from the overhead area 515 const int overhead_size = 516 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 517 518 const int page_size = os::vm_page_size(); 519 520 Label after_frame_check; 521 522 // see if the frame is greater than one page in size. If so, 523 // then we need to verify there is enough stack space remaining 524 // for the additional locals. 525 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 526 __ jcc(Assembler::belowEqual, after_frame_check); 527 528 // compute rsp as if this were going to be the last frame on 529 // the stack before the red zone 530 531 Label after_frame_check_pop; 532 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 533 #ifndef _LP64 534 __ push(thread); 535 __ get_thread(thread); 536 #endif 537 538 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 539 540 // locals + overhead, in bytes 541 __ mov(rax, rdx); 542 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 543 __ addptr(rax, overhead_size); 544 545 #ifdef ASSERT 546 Label limit_okay; 547 // Verify that thread stack overflow limit is non-zero. 548 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 549 __ jcc(Assembler::notEqual, limit_okay); 550 __ stop("stack overflow limit is zero"); 551 __ bind(limit_okay); 552 #endif 553 554 // Add locals/frame size to stack limit. 555 __ addptr(rax, stack_limit); 556 557 // Check against the current stack bottom. 558 __ cmpptr(rsp, rax); 559 560 __ jcc(Assembler::above, after_frame_check_pop); 561 NOT_LP64(__ pop(rsi)); // get saved bcp 562 563 // Restore sender's sp as SP. This is necessary if the sender's 564 // frame is an extended compiled frame (see gen_c2i_adapter()) 565 // and safer anyway in case of JSR292 adaptations. 566 567 __ pop(rax); // return address must be moved if SP is changed 568 __ mov(rsp, rbcp); 569 __ push(rax); 570 571 // Note: the restored frame is not necessarily interpreted. 572 // Use the shared runtime version of the StackOverflowError. 573 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 574 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 575 // all done with frame size check 576 __ bind(after_frame_check_pop); 577 NOT_LP64(__ pop(rsi)); 578 579 // all done with frame size check 580 __ bind(after_frame_check); 581 } 582 583 // Allocate monitor and lock method (asm interpreter) 584 // 585 // Args: 586 // rbx: Method* 587 // r14/rdi: locals 588 // 589 // Kills: 590 // rax 591 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 592 // rscratch1, rscratch2 (scratch regs) 593 void TemplateInterpreterGenerator::lock_method() { 594 // synchronize method 595 const Address access_flags(rbx, Method::access_flags_offset()); 596 const Address monitor_block_top( 597 rbp, 598 frame::interpreter_frame_monitor_block_top_offset * wordSize); 599 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 600 601 #ifdef ASSERT 602 { 603 Label L; 604 __ movl(rax, access_flags); 605 __ testl(rax, JVM_ACC_SYNCHRONIZED); 606 __ jcc(Assembler::notZero, L); 607 __ stop("method doesn't need synchronization"); 608 __ bind(L); 609 } 610 #endif // ASSERT 611 612 // get synchronization object 613 { 614 Label done; 615 __ movl(rax, access_flags); 616 __ testl(rax, JVM_ACC_STATIC); 617 // get receiver (assume this is frequent case) 618 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 619 __ jcc(Assembler::zero, done); 620 __ load_mirror(rax, rbx); 621 622 #ifdef ASSERT 623 { 624 Label L; 625 __ testptr(rax, rax); 626 __ jcc(Assembler::notZero, L); 627 __ stop("synchronization object is NULL"); 628 __ bind(L); 629 } 630 #endif // ASSERT 631 632 __ bind(done); 633 } 634 635 // add space for monitor & lock 636 __ subptr(rsp, entry_size); // add space for a monitor entry 637 __ movptr(monitor_block_top, rsp); // set new monitor block top 638 // store object 639 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 640 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 641 __ movptr(lockreg, rsp); // object address 642 __ lock_object(lockreg); 643 } 644 645 // Generate a fixed interpreter frame. This is identical setup for 646 // interpreted methods and for native methods hence the shared code. 647 // 648 // Args: 649 // rax: return address 650 // rbx: Method* 651 // r14/rdi: pointer to locals 652 // r13/rsi: sender sp 653 // rdx: cp cache 654 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 655 // initialize fixed part of activation frame 656 __ push(rax); // save return address 657 __ enter(); // save old & set new rbp 658 __ push(rbcp); // set sender sp 659 __ push((int)NULL_WORD); // leave last_sp as null 660 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 661 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 662 __ push(rbx); // save Method* 663 // Get mirror and store it in the frame as GC root for this Method* 664 __ load_mirror(rdx, rbx); 665 __ push(rdx); 666 if (ProfileInterpreter) { 667 Label method_data_continue; 668 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 669 __ testptr(rdx, rdx); 670 __ jcc(Assembler::zero, method_data_continue); 671 __ addptr(rdx, in_bytes(MethodData::data_offset())); 672 __ bind(method_data_continue); 673 __ push(rdx); // set the mdp (method data pointer) 674 } else { 675 __ push(0); 676 } 677 678 __ movptr(rdx, Address(rbx, Method::const_offset())); 679 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 680 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 681 __ push(rdx); // set constant pool cache 682 __ push(rlocals); // set locals pointer 683 if (native_call) { 684 __ push(0); // no bcp 685 } else { 686 __ push(rbcp); // set bcp 687 } 688 __ push(0); // reserve word for pointer to expression stack bottom 689 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 690 } 691 692 // End of helpers 693 694 // Method entry for java.lang.ref.Reference.get. 695 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 696 #if INCLUDE_ALL_GCS 697 // Code: _aload_0, _getfield, _areturn 698 // parameter size = 1 699 // 700 // The code that gets generated by this routine is split into 2 parts: 701 // 1. The "intrinsified" code for G1 (or any SATB based GC), 702 // 2. The slow path - which is an expansion of the regular method entry. 703 // 704 // Notes:- 705 // * In the G1 code we do not check whether we need to block for 706 // a safepoint. If G1 is enabled then we must execute the specialized 707 // code for Reference.get (except when the Reference object is null) 708 // so that we can log the value in the referent field with an SATB 709 // update buffer. 710 // If the code for the getfield template is modified so that the 711 // G1 pre-barrier code is executed when the current method is 712 // Reference.get() then going through the normal method entry 713 // will be fine. 714 // * The G1 code can, however, check the receiver object (the instance 715 // of java.lang.Reference) and jump to the slow path if null. If the 716 // Reference object is null then we obviously cannot fetch the referent 717 // and so we don't need to call the G1 pre-barrier. Thus we can use the 718 // regular method entry code to generate the NPE. 719 // 720 // rbx: Method* 721 722 // r13: senderSP must preserve for slow path, set SP to it on fast path 723 724 address entry = __ pc(); 725 726 const int referent_offset = java_lang_ref_Reference::referent_offset; 727 guarantee(referent_offset > 0, "referent offset not initialized"); 728 729 if (UseG1GC) { 730 Label slow_path; 731 // rbx: method 732 733 // Check if local 0 != NULL 734 // If the receiver is null then it is OK to jump to the slow path. 735 __ movptr(rax, Address(rsp, wordSize)); 736 737 __ testptr(rax, rax); 738 __ jcc(Assembler::zero, slow_path); 739 740 // rax: local 0 741 // rbx: method (but can be used as scratch now) 742 // rdx: scratch 743 // rdi: scratch 744 745 // Preserve the sender sp in case the pre-barrier 746 // calls the runtime 747 NOT_LP64(__ push(rsi)); 748 749 // Generate the G1 pre-barrier code to log the value of 750 // the referent field in an SATB buffer. 751 752 // Load the value of the referent field. 753 const Address field_address(rax, referent_offset); 754 __ load_heap_oop(rax, field_address); 755 756 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 757 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 758 NOT_LP64(__ get_thread(thread)); 759 760 // Generate the G1 pre-barrier code to log the value of 761 // the referent field in an SATB buffer. 762 __ g1_write_barrier_pre(noreg /* obj */, 763 rax /* pre_val */, 764 thread /* thread */, 765 rbx /* tmp */, 766 true /* tosca_live */, 767 true /* expand_call */); 768 769 // _areturn 770 NOT_LP64(__ pop(rsi)); // get sender sp 771 __ pop(rdi); // get return address 772 __ mov(rsp, sender_sp); // set sp to sender sp 773 __ jmp(rdi); 774 __ ret(0); 775 776 // generate a vanilla interpreter entry as the slow path 777 __ bind(slow_path); 778 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 779 return entry; 780 } 781 #endif // INCLUDE_ALL_GCS 782 783 // If G1 is not enabled then attempt to go through the accessor entry point 784 // Reference.get is an accessor 785 return NULL; 786 } 787 788 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 789 // Quick & dirty stack overflow checking: bang the stack & handle trap. 790 // Note that we do the banging after the frame is setup, since the exception 791 // handling code expects to find a valid interpreter frame on the stack. 792 // Doing the banging earlier fails if the caller frame is not an interpreter 793 // frame. 794 // (Also, the exception throwing code expects to unlock any synchronized 795 // method receiever, so do the banging after locking the receiver.) 796 797 // Bang each page in the shadow zone. We can't assume it's been done for 798 // an interpreter frame with greater than a page of locals, so each page 799 // needs to be checked. Only true for non-native. 800 if (UseStackBanging) { 801 const int page_size = os::vm_page_size(); 802 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 803 const int start_page = native_call ? n_shadow_pages : 1; 804 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 805 __ bang_stack_with_offset(pages*page_size); 806 } 807 } 808 } 809 810 // Interpreter stub for calling a native method. (asm interpreter) 811 // This sets up a somewhat different looking stack for calling the 812 // native method than the typical interpreter frame setup. 813 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 814 // determine code generation flags 815 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 816 817 // rbx: Method* 818 // rbcp: sender sp 819 820 address entry_point = __ pc(); 821 822 const Address constMethod (rbx, Method::const_offset()); 823 const Address access_flags (rbx, Method::access_flags_offset()); 824 const Address size_of_parameters(rcx, ConstMethod:: 825 size_of_parameters_offset()); 826 827 828 // get parameter size (always needed) 829 __ movptr(rcx, constMethod); 830 __ load_unsigned_short(rcx, size_of_parameters); 831 832 // native calls don't need the stack size check since they have no 833 // expression stack and the arguments are already on the stack and 834 // we only add a handful of words to the stack 835 836 // rbx: Method* 837 // rcx: size of parameters 838 // rbcp: sender sp 839 __ pop(rax); // get return address 840 841 // for natives the size of locals is zero 842 843 // compute beginning of parameters 844 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 845 846 // add 2 zero-initialized slots for native calls 847 // initialize result_handler slot 848 __ push((int) NULL_WORD); 849 // slot for oop temp 850 // (static native method holder mirror/jni oop result) 851 __ push((int) NULL_WORD); 852 853 // initialize fixed part of activation frame 854 generate_fixed_frame(true); 855 856 // make sure method is native & not abstract 857 #ifdef ASSERT 858 __ movl(rax, access_flags); 859 { 860 Label L; 861 __ testl(rax, JVM_ACC_NATIVE); 862 __ jcc(Assembler::notZero, L); 863 __ stop("tried to execute non-native method as native"); 864 __ bind(L); 865 } 866 { 867 Label L; 868 __ testl(rax, JVM_ACC_ABSTRACT); 869 __ jcc(Assembler::zero, L); 870 __ stop("tried to execute abstract method in interpreter"); 871 __ bind(L); 872 } 873 #endif 874 875 // Since at this point in the method invocation the exception handler 876 // would try to exit the monitor of synchronized methods which hasn't 877 // been entered yet, we set the thread local variable 878 // _do_not_unlock_if_synchronized to true. The remove_activation will 879 // check this flag. 880 881 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 882 NOT_LP64(__ get_thread(thread1)); 883 const Address do_not_unlock_if_synchronized(thread1, 884 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 885 __ movbool(do_not_unlock_if_synchronized, true); 886 887 // increment invocation count & check for overflow 888 Label invocation_counter_overflow; 889 if (inc_counter) { 890 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 891 } 892 893 Label continue_after_compile; 894 __ bind(continue_after_compile); 895 896 bang_stack_shadow_pages(true); 897 898 // reset the _do_not_unlock_if_synchronized flag 899 NOT_LP64(__ get_thread(thread1)); 900 __ movbool(do_not_unlock_if_synchronized, false); 901 902 // check for synchronized methods 903 // Must happen AFTER invocation_counter check and stack overflow check, 904 // so method is not locked if overflows. 905 if (synchronized) { 906 lock_method(); 907 } else { 908 // no synchronization necessary 909 #ifdef ASSERT 910 { 911 Label L; 912 __ movl(rax, access_flags); 913 __ testl(rax, JVM_ACC_SYNCHRONIZED); 914 __ jcc(Assembler::zero, L); 915 __ stop("method needs synchronization"); 916 __ bind(L); 917 } 918 #endif 919 } 920 921 // start execution 922 #ifdef ASSERT 923 { 924 Label L; 925 const Address monitor_block_top(rbp, 926 frame::interpreter_frame_monitor_block_top_offset * wordSize); 927 __ movptr(rax, monitor_block_top); 928 __ cmpptr(rax, rsp); 929 __ jcc(Assembler::equal, L); 930 __ stop("broken stack frame setup in interpreter"); 931 __ bind(L); 932 } 933 #endif 934 935 // jvmti support 936 __ notify_method_entry(); 937 938 // work registers 939 const Register method = rbx; 940 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 941 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 942 943 // allocate space for parameters 944 __ get_method(method); 945 __ movptr(t, Address(method, Method::const_offset())); 946 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 947 948 #ifndef _LP64 949 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 950 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 951 __ subptr(rsp, t); 952 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 953 #else 954 __ shll(t, Interpreter::logStackElementSize); 955 956 __ subptr(rsp, t); 957 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 958 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 959 #endif // _LP64 960 961 // get signature handler 962 { 963 Label L; 964 __ movptr(t, Address(method, Method::signature_handler_offset())); 965 __ testptr(t, t); 966 __ jcc(Assembler::notZero, L); 967 __ call_VM(noreg, 968 CAST_FROM_FN_PTR(address, 969 InterpreterRuntime::prepare_native_call), 970 method); 971 __ get_method(method); 972 __ movptr(t, Address(method, Method::signature_handler_offset())); 973 __ bind(L); 974 } 975 976 // call signature handler 977 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 978 "adjust this code"); 979 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 980 "adjust this code"); 981 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 982 "adjust this code"); 983 984 // The generated handlers do not touch RBX (the method oop). 985 // However, large signatures cannot be cached and are generated 986 // each time here. The slow-path generator can do a GC on return, 987 // so we must reload it after the call. 988 __ call(t); 989 __ get_method(method); // slow path can do a GC, reload RBX 990 991 992 // result handler is in rax 993 // set result handler 994 __ movptr(Address(rbp, 995 (frame::interpreter_frame_result_handler_offset) * wordSize), 996 rax); 997 998 // pass mirror handle if static call 999 { 1000 Label L; 1001 __ movl(t, Address(method, Method::access_flags_offset())); 1002 __ testl(t, JVM_ACC_STATIC); 1003 __ jcc(Assembler::zero, L); 1004 // get mirror 1005 __ load_mirror(t, method); 1006 // copy mirror into activation frame 1007 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1008 t); 1009 // pass handle to mirror 1010 #ifndef _LP64 1011 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1012 __ movptr(Address(rsp, wordSize), t); 1013 #else 1014 __ lea(c_rarg1, 1015 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1016 #endif // _LP64 1017 __ bind(L); 1018 } 1019 1020 // get native function entry point 1021 { 1022 Label L; 1023 __ movptr(rax, Address(method, Method::native_function_offset())); 1024 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1025 __ cmpptr(rax, unsatisfied.addr()); 1026 __ jcc(Assembler::notEqual, L); 1027 __ call_VM(noreg, 1028 CAST_FROM_FN_PTR(address, 1029 InterpreterRuntime::prepare_native_call), 1030 method); 1031 __ get_method(method); 1032 __ movptr(rax, Address(method, Method::native_function_offset())); 1033 __ bind(L); 1034 } 1035 1036 // pass JNIEnv 1037 #ifndef _LP64 1038 __ get_thread(thread); 1039 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1040 __ movptr(Address(rsp, 0), t); 1041 1042 // set_last_Java_frame_before_call 1043 // It is enough that the pc() 1044 // points into the right code segment. It does not have to be the correct return pc. 1045 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1046 #else 1047 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1048 1049 // It is enough that the pc() points into the right code 1050 // segment. It does not have to be the correct return pc. 1051 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1052 #endif // _LP64 1053 1054 // change thread state 1055 #ifdef ASSERT 1056 { 1057 Label L; 1058 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1059 __ cmpl(t, _thread_in_Java); 1060 __ jcc(Assembler::equal, L); 1061 __ stop("Wrong thread state in native stub"); 1062 __ bind(L); 1063 } 1064 #endif 1065 1066 // Change state to native 1067 1068 __ movl(Address(thread, JavaThread::thread_state_offset()), 1069 _thread_in_native); 1070 1071 // Call the native method. 1072 __ call(rax); 1073 // 32: result potentially in rdx:rax or ST0 1074 // 64: result potentially in rax or xmm0 1075 1076 // Verify or restore cpu control state after JNI call 1077 __ restore_cpu_control_state_after_jni(); 1078 1079 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1080 // in order to extract the result of a method call. If the order of these 1081 // pushes change or anything else is added to the stack then the code in 1082 // interpreter_frame_result must also change. 1083 1084 #ifndef _LP64 1085 // save potential result in ST(0) & rdx:rax 1086 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1087 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1088 // It is safe to do this push because state is _thread_in_native and return address will be found 1089 // via _last_native_pc and not via _last_jave_sp 1090 1091 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1092 // If the order changes or anything else is added to the stack the code in 1093 // interpreter_frame_result will have to be changed. 1094 1095 { Label L; 1096 Label push_double; 1097 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1098 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1099 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1100 float_handler.addr()); 1101 __ jcc(Assembler::equal, push_double); 1102 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1103 double_handler.addr()); 1104 __ jcc(Assembler::notEqual, L); 1105 __ bind(push_double); 1106 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1107 __ bind(L); 1108 } 1109 #else 1110 __ push(dtos); 1111 #endif // _LP64 1112 1113 __ push(ltos); 1114 1115 // change thread state 1116 NOT_LP64(__ get_thread(thread)); 1117 __ movl(Address(thread, JavaThread::thread_state_offset()), 1118 _thread_in_native_trans); 1119 1120 if (os::is_MP()) { 1121 if (UseMembar) { 1122 // Force this write out before the read below 1123 __ membar(Assembler::Membar_mask_bits( 1124 Assembler::LoadLoad | Assembler::LoadStore | 1125 Assembler::StoreLoad | Assembler::StoreStore)); 1126 } else { 1127 // Write serialization page so VM thread can do a pseudo remote membar. 1128 // We use the current thread pointer to calculate a thread specific 1129 // offset to write to within the page. This minimizes bus traffic 1130 // due to cache line collision. 1131 __ serialize_memory(thread, rcx); 1132 } 1133 } 1134 1135 #ifndef _LP64 1136 if (AlwaysRestoreFPU) { 1137 // Make sure the control word is correct. 1138 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1139 } 1140 #endif // _LP64 1141 1142 // check for safepoint operation in progress and/or pending suspend requests 1143 { 1144 Label Continue; 1145 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1146 SafepointSynchronize::_not_synchronized); 1147 1148 Label L; 1149 __ jcc(Assembler::notEqual, L); 1150 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1151 __ jcc(Assembler::equal, Continue); 1152 __ bind(L); 1153 1154 // Don't use call_VM as it will see a possible pending exception 1155 // and forward it and never return here preventing us from 1156 // clearing _last_native_pc down below. Also can't use 1157 // call_VM_leaf either as it will check to see if r13 & r14 are 1158 // preserved and correspond to the bcp/locals pointers. So we do a 1159 // runtime call by hand. 1160 // 1161 #ifndef _LP64 1162 __ push(thread); 1163 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1164 JavaThread::check_special_condition_for_native_trans))); 1165 __ increment(rsp, wordSize); 1166 __ get_thread(thread); 1167 #else 1168 __ mov(c_rarg0, r15_thread); 1169 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1170 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1171 __ andptr(rsp, -16); // align stack as required by ABI 1172 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1173 __ mov(rsp, r12); // restore sp 1174 __ reinit_heapbase(); 1175 #endif // _LP64 1176 __ bind(Continue); 1177 } 1178 1179 // change thread state 1180 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1181 1182 // reset_last_Java_frame 1183 __ reset_last_Java_frame(thread, true); 1184 1185 if (CheckJNICalls) { 1186 // clear_pending_jni_exception_check 1187 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1188 } 1189 1190 // reset handle block 1191 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1192 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1193 1194 // If result is an oop unbox and store it in frame where gc will see it 1195 // and result handler will pick it up 1196 1197 { 1198 Label no_oop, not_weak, store_result; 1199 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1200 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1201 __ jcc(Assembler::notEqual, no_oop); 1202 // retrieve result 1203 __ pop(ltos); 1204 // Unbox oop result, e.g. JNIHandles::resolve value. 1205 __ resolve_jobject(rax /* value */, 1206 thread /* thread */, 1207 t /* tmp */); 1208 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1209 // keep stack depth as expected by pushing oop which will eventually be discarded 1210 __ push(ltos); 1211 __ bind(no_oop); 1212 } 1213 1214 1215 { 1216 Label no_reguard; 1217 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1218 JavaThread::stack_guard_yellow_reserved_disabled); 1219 __ jcc(Assembler::notEqual, no_reguard); 1220 1221 __ pusha(); // XXX only save smashed registers 1222 #ifndef _LP64 1223 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1224 __ popa(); 1225 #else 1226 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1227 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1228 __ andptr(rsp, -16); // align stack as required by ABI 1229 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1230 __ mov(rsp, r12); // restore sp 1231 __ popa(); // XXX only restore smashed registers 1232 __ reinit_heapbase(); 1233 #endif // _LP64 1234 1235 __ bind(no_reguard); 1236 } 1237 1238 1239 // The method register is junk from after the thread_in_native transition 1240 // until here. Also can't call_VM until the bcp has been 1241 // restored. Need bcp for throwing exception below so get it now. 1242 __ get_method(method); 1243 1244 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1245 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1246 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1247 1248 // handle exceptions (exception handling will handle unlocking!) 1249 { 1250 Label L; 1251 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1252 __ jcc(Assembler::zero, L); 1253 // Note: At some point we may want to unify this with the code 1254 // used in call_VM_base(); i.e., we should use the 1255 // StubRoutines::forward_exception code. For now this doesn't work 1256 // here because the rsp is not correctly set at this point. 1257 __ MacroAssembler::call_VM(noreg, 1258 CAST_FROM_FN_PTR(address, 1259 InterpreterRuntime::throw_pending_exception)); 1260 __ should_not_reach_here(); 1261 __ bind(L); 1262 } 1263 1264 // do unlocking if necessary 1265 { 1266 Label L; 1267 __ movl(t, Address(method, Method::access_flags_offset())); 1268 __ testl(t, JVM_ACC_SYNCHRONIZED); 1269 __ jcc(Assembler::zero, L); 1270 // the code below should be shared with interpreter macro 1271 // assembler implementation 1272 { 1273 Label unlock; 1274 // BasicObjectLock will be first in list, since this is a 1275 // synchronized method. However, need to check that the object 1276 // has not been unlocked by an explicit monitorexit bytecode. 1277 const Address monitor(rbp, 1278 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1279 wordSize - (int)sizeof(BasicObjectLock))); 1280 1281 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1282 1283 // monitor expect in c_rarg1 for slow unlock path 1284 __ lea(regmon, monitor); // address of first monitor 1285 1286 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1287 __ testptr(t, t); 1288 __ jcc(Assembler::notZero, unlock); 1289 1290 // Entry already unlocked, need to throw exception 1291 __ MacroAssembler::call_VM(noreg, 1292 CAST_FROM_FN_PTR(address, 1293 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1294 __ should_not_reach_here(); 1295 1296 __ bind(unlock); 1297 __ unlock_object(regmon); 1298 } 1299 __ bind(L); 1300 } 1301 1302 // jvmti support 1303 // Note: This must happen _after_ handling/throwing any exceptions since 1304 // the exception handler code notifies the runtime of method exits 1305 // too. If this happens before, method entry/exit notifications are 1306 // not properly paired (was bug - gri 11/22/99). 1307 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1308 1309 // restore potential result in edx:eax, call result handler to 1310 // restore potential result in ST0 & handle result 1311 1312 __ pop(ltos); 1313 LP64_ONLY( __ pop(dtos)); 1314 1315 __ movptr(t, Address(rbp, 1316 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1317 __ call(t); 1318 1319 // remove activation 1320 __ movptr(t, Address(rbp, 1321 frame::interpreter_frame_sender_sp_offset * 1322 wordSize)); // get sender sp 1323 __ leave(); // remove frame anchor 1324 __ pop(rdi); // get return address 1325 __ mov(rsp, t); // set sp to sender sp 1326 __ jmp(rdi); 1327 1328 if (inc_counter) { 1329 // Handle overflow of counter and compile method 1330 __ bind(invocation_counter_overflow); 1331 generate_counter_overflow(continue_after_compile); 1332 } 1333 1334 return entry_point; 1335 } 1336 1337 // Abstract method entry 1338 // Attempt to execute abstract method. Throw exception 1339 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1340 1341 address entry_point = __ pc(); 1342 1343 // abstract method entry 1344 1345 // pop return address, reset last_sp to NULL 1346 __ empty_expression_stack(); 1347 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1348 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1349 1350 // throw exception 1351 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 1352 // the call_VM checks for exception, so we should never return here. 1353 __ should_not_reach_here(); 1354 1355 return entry_point; 1356 } 1357 1358 // 1359 // Generic interpreted method entry to (asm) interpreter 1360 // 1361 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1362 // determine code generation flags 1363 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1364 1365 // ebx: Method* 1366 // rbcp: sender sp 1367 address entry_point = __ pc(); 1368 1369 const Address constMethod(rbx, Method::const_offset()); 1370 const Address access_flags(rbx, Method::access_flags_offset()); 1371 const Address size_of_parameters(rdx, 1372 ConstMethod::size_of_parameters_offset()); 1373 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1374 1375 1376 // get parameter size (always needed) 1377 __ movptr(rdx, constMethod); 1378 __ load_unsigned_short(rcx, size_of_parameters); 1379 1380 // rbx: Method* 1381 // rcx: size of parameters 1382 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1383 1384 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1385 __ subl(rdx, rcx); // rdx = no. of additional locals 1386 1387 // YYY 1388 // __ incrementl(rdx); 1389 // __ andl(rdx, -2); 1390 1391 // see if we've got enough room on the stack for locals plus overhead. 1392 generate_stack_overflow_check(); 1393 1394 // get return address 1395 __ pop(rax); 1396 1397 // compute beginning of parameters 1398 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1399 1400 // rdx - # of additional locals 1401 // allocate space for locals 1402 // explicitly initialize locals 1403 { 1404 Label exit, loop; 1405 __ testl(rdx, rdx); 1406 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1407 __ bind(loop); 1408 __ push((int) NULL_WORD); // initialize local variables 1409 __ decrementl(rdx); // until everything initialized 1410 __ jcc(Assembler::greater, loop); 1411 __ bind(exit); 1412 } 1413 1414 // initialize fixed part of activation frame 1415 generate_fixed_frame(false); 1416 1417 // make sure method is not native & not abstract 1418 #ifdef ASSERT 1419 __ movl(rax, access_flags); 1420 { 1421 Label L; 1422 __ testl(rax, JVM_ACC_NATIVE); 1423 __ jcc(Assembler::zero, L); 1424 __ stop("tried to execute native method as non-native"); 1425 __ bind(L); 1426 } 1427 { 1428 Label L; 1429 __ testl(rax, JVM_ACC_ABSTRACT); 1430 __ jcc(Assembler::zero, L); 1431 __ stop("tried to execute abstract method in interpreter"); 1432 __ bind(L); 1433 } 1434 #endif 1435 1436 // Since at this point in the method invocation the exception 1437 // handler would try to exit the monitor of synchronized methods 1438 // which hasn't been entered yet, we set the thread local variable 1439 // _do_not_unlock_if_synchronized to true. The remove_activation 1440 // will check this flag. 1441 1442 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1443 NOT_LP64(__ get_thread(thread)); 1444 const Address do_not_unlock_if_synchronized(thread, 1445 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1446 __ movbool(do_not_unlock_if_synchronized, true); 1447 1448 __ profile_parameters_type(rax, rcx, rdx); 1449 // increment invocation count & check for overflow 1450 Label invocation_counter_overflow; 1451 Label profile_method; 1452 Label profile_method_continue; 1453 if (inc_counter) { 1454 generate_counter_incr(&invocation_counter_overflow, 1455 &profile_method, 1456 &profile_method_continue); 1457 if (ProfileInterpreter) { 1458 __ bind(profile_method_continue); 1459 } 1460 } 1461 1462 Label continue_after_compile; 1463 __ bind(continue_after_compile); 1464 1465 // check for synchronized interpreted methods 1466 bang_stack_shadow_pages(false); 1467 1468 // reset the _do_not_unlock_if_synchronized flag 1469 NOT_LP64(__ get_thread(thread)); 1470 __ movbool(do_not_unlock_if_synchronized, false); 1471 1472 // check for synchronized methods 1473 // Must happen AFTER invocation_counter check and stack overflow check, 1474 // so method is not locked if overflows. 1475 if (synchronized) { 1476 // Allocate monitor and lock method 1477 lock_method(); 1478 } else { 1479 // no synchronization necessary 1480 #ifdef ASSERT 1481 { 1482 Label L; 1483 __ movl(rax, access_flags); 1484 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1485 __ jcc(Assembler::zero, L); 1486 __ stop("method needs synchronization"); 1487 __ bind(L); 1488 } 1489 #endif 1490 } 1491 1492 // start execution 1493 #ifdef ASSERT 1494 { 1495 Label L; 1496 const Address monitor_block_top (rbp, 1497 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1498 __ movptr(rax, monitor_block_top); 1499 __ cmpptr(rax, rsp); 1500 __ jcc(Assembler::equal, L); 1501 __ stop("broken stack frame setup in interpreter"); 1502 __ bind(L); 1503 } 1504 #endif 1505 1506 // jvmti support 1507 __ notify_method_entry(); 1508 1509 __ dispatch_next(vtos); 1510 1511 // invocation counter overflow 1512 if (inc_counter) { 1513 if (ProfileInterpreter) { 1514 // We have decided to profile this method in the interpreter 1515 __ bind(profile_method); 1516 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1517 __ set_method_data_pointer_for_bcp(); 1518 __ get_method(rbx); 1519 __ jmp(profile_method_continue); 1520 } 1521 // Handle overflow of counter and compile method 1522 __ bind(invocation_counter_overflow); 1523 generate_counter_overflow(continue_after_compile); 1524 } 1525 1526 return entry_point; 1527 } 1528 1529 //----------------------------------------------------------------------------- 1530 // Exceptions 1531 1532 void TemplateInterpreterGenerator::generate_throw_exception() { 1533 // Entry point in previous activation (i.e., if the caller was 1534 // interpreted) 1535 Interpreter::_rethrow_exception_entry = __ pc(); 1536 // Restore sp to interpreter_frame_last_sp even though we are going 1537 // to empty the expression stack for the exception processing. 1538 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1539 // rax: exception 1540 // rdx: return address/pc that threw exception 1541 __ restore_bcp(); // r13/rsi points to call/send 1542 __ restore_locals(); 1543 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1544 // Entry point for exceptions thrown within interpreter code 1545 Interpreter::_throw_exception_entry = __ pc(); 1546 // expression stack is undefined here 1547 // rax: exception 1548 // r13/rsi: exception bcp 1549 __ verify_oop(rax); 1550 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1551 LP64_ONLY(__ mov(c_rarg1, rax)); 1552 1553 // expression stack must be empty before entering the VM in case of 1554 // an exception 1555 __ empty_expression_stack(); 1556 // find exception handler address and preserve exception oop 1557 __ call_VM(rdx, 1558 CAST_FROM_FN_PTR(address, 1559 InterpreterRuntime::exception_handler_for_exception), 1560 rarg); 1561 // rax: exception handler entry point 1562 // rdx: preserved exception oop 1563 // r13/rsi: bcp for exception handler 1564 __ push_ptr(rdx); // push exception which is now the only value on the stack 1565 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1566 1567 // If the exception is not handled in the current frame the frame is 1568 // removed and the exception is rethrown (i.e. exception 1569 // continuation is _rethrow_exception). 1570 // 1571 // Note: At this point the bci is still the bxi for the instruction 1572 // which caused the exception and the expression stack is 1573 // empty. Thus, for any VM calls at this point, GC will find a legal 1574 // oop map (with empty expression stack). 1575 1576 // In current activation 1577 // tos: exception 1578 // esi: exception bcp 1579 1580 // 1581 // JVMTI PopFrame support 1582 // 1583 1584 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1585 __ empty_expression_stack(); 1586 // Set the popframe_processing bit in pending_popframe_condition 1587 // indicating that we are currently handling popframe, so that 1588 // call_VMs that may happen later do not trigger new popframe 1589 // handling cycles. 1590 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1591 NOT_LP64(__ get_thread(thread)); 1592 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1593 __ orl(rdx, JavaThread::popframe_processing_bit); 1594 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1595 1596 { 1597 // Check to see whether we are returning to a deoptimized frame. 1598 // (The PopFrame call ensures that the caller of the popped frame is 1599 // either interpreted or compiled and deoptimizes it if compiled.) 1600 // In this case, we can't call dispatch_next() after the frame is 1601 // popped, but instead must save the incoming arguments and restore 1602 // them after deoptimization has occurred. 1603 // 1604 // Note that we don't compare the return PC against the 1605 // deoptimization blob's unpack entry because of the presence of 1606 // adapter frames in C2. 1607 Label caller_not_deoptimized; 1608 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1609 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1610 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1611 InterpreterRuntime::interpreter_contains), rarg); 1612 __ testl(rax, rax); 1613 __ jcc(Assembler::notZero, caller_not_deoptimized); 1614 1615 // Compute size of arguments for saving when returning to 1616 // deoptimized caller 1617 __ get_method(rax); 1618 __ movptr(rax, Address(rax, Method::const_offset())); 1619 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1620 size_of_parameters_offset()))); 1621 __ shll(rax, Interpreter::logStackElementSize); 1622 __ restore_locals(); 1623 __ subptr(rlocals, rax); 1624 __ addptr(rlocals, wordSize); 1625 // Save these arguments 1626 NOT_LP64(__ get_thread(thread)); 1627 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1628 Deoptimization:: 1629 popframe_preserve_args), 1630 thread, rax, rlocals); 1631 1632 __ remove_activation(vtos, rdx, 1633 /* throw_monitor_exception */ false, 1634 /* install_monitor_exception */ false, 1635 /* notify_jvmdi */ false); 1636 1637 // Inform deoptimization that it is responsible for restoring 1638 // these arguments 1639 NOT_LP64(__ get_thread(thread)); 1640 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1641 JavaThread::popframe_force_deopt_reexecution_bit); 1642 1643 // Continue in deoptimization handler 1644 __ jmp(rdx); 1645 1646 __ bind(caller_not_deoptimized); 1647 } 1648 1649 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1650 /* throw_monitor_exception */ false, 1651 /* install_monitor_exception */ false, 1652 /* notify_jvmdi */ false); 1653 1654 // Finish with popframe handling 1655 // A previous I2C followed by a deoptimization might have moved the 1656 // outgoing arguments further up the stack. PopFrame expects the 1657 // mutations to those outgoing arguments to be preserved and other 1658 // constraints basically require this frame to look exactly as 1659 // though it had previously invoked an interpreted activation with 1660 // no space between the top of the expression stack (current 1661 // last_sp) and the top of stack. Rather than force deopt to 1662 // maintain this kind of invariant all the time we call a small 1663 // fixup routine to move the mutated arguments onto the top of our 1664 // expression stack if necessary. 1665 #ifndef _LP64 1666 __ mov(rax, rsp); 1667 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1668 __ get_thread(thread); 1669 // PC must point into interpreter here 1670 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1671 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1672 __ get_thread(thread); 1673 #else 1674 __ mov(c_rarg1, rsp); 1675 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1676 // PC must point into interpreter here 1677 __ set_last_Java_frame(noreg, rbp, __ pc()); 1678 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1679 #endif 1680 __ reset_last_Java_frame(thread, true); 1681 1682 // Restore the last_sp and null it out 1683 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1684 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1685 1686 __ restore_bcp(); 1687 __ restore_locals(); 1688 // The method data pointer was incremented already during 1689 // call profiling. We have to restore the mdp for the current bcp. 1690 if (ProfileInterpreter) { 1691 __ set_method_data_pointer_for_bcp(); 1692 } 1693 1694 // Clear the popframe condition flag 1695 NOT_LP64(__ get_thread(thread)); 1696 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1697 JavaThread::popframe_inactive); 1698 1699 #if INCLUDE_JVMTI 1700 { 1701 Label L_done; 1702 const Register local0 = rlocals; 1703 1704 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1705 __ jcc(Assembler::notEqual, L_done); 1706 1707 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1708 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1709 1710 __ get_method(rdx); 1711 __ movptr(rax, Address(local0, 0)); 1712 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1713 1714 __ testptr(rax, rax); 1715 __ jcc(Assembler::zero, L_done); 1716 1717 __ movptr(Address(rbx, 0), rax); 1718 __ bind(L_done); 1719 } 1720 #endif // INCLUDE_JVMTI 1721 1722 __ dispatch_next(vtos); 1723 // end of PopFrame support 1724 1725 Interpreter::_remove_activation_entry = __ pc(); 1726 1727 // preserve exception over this code sequence 1728 __ pop_ptr(rax); 1729 NOT_LP64(__ get_thread(thread)); 1730 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1731 // remove the activation (without doing throws on illegalMonitorExceptions) 1732 __ remove_activation(vtos, rdx, false, true, false); 1733 // restore exception 1734 NOT_LP64(__ get_thread(thread)); 1735 __ get_vm_result(rax, thread); 1736 1737 // In between activations - previous activation type unknown yet 1738 // compute continuation point - the continuation point expects the 1739 // following registers set up: 1740 // 1741 // rax: exception 1742 // rdx: return address/pc that threw exception 1743 // rsp: expression stack of caller 1744 // rbp: ebp of caller 1745 __ push(rax); // save exception 1746 __ push(rdx); // save return address 1747 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1748 SharedRuntime::exception_handler_for_return_address), 1749 thread, rdx); 1750 __ mov(rbx, rax); // save exception handler 1751 __ pop(rdx); // restore return address 1752 __ pop(rax); // restore exception 1753 // Note that an "issuing PC" is actually the next PC after the call 1754 __ jmp(rbx); // jump to exception 1755 // handler of caller 1756 } 1757 1758 1759 // 1760 // JVMTI ForceEarlyReturn support 1761 // 1762 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1763 address entry = __ pc(); 1764 1765 __ restore_bcp(); 1766 __ restore_locals(); 1767 __ empty_expression_stack(); 1768 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1769 1770 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1771 NOT_LP64(__ get_thread(thread)); 1772 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1773 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1774 1775 // Clear the earlyret state 1776 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1777 1778 __ remove_activation(state, rsi, 1779 false, /* throw_monitor_exception */ 1780 false, /* install_monitor_exception */ 1781 true); /* notify_jvmdi */ 1782 __ jmp(rsi); 1783 1784 return entry; 1785 } // end of ForceEarlyReturn support 1786 1787 1788 //----------------------------------------------------------------------------- 1789 // Helper for vtos entry point generation 1790 1791 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1792 address& bep, 1793 address& cep, 1794 address& sep, 1795 address& aep, 1796 address& iep, 1797 address& lep, 1798 address& fep, 1799 address& dep, 1800 address& qep, 1801 address& vep) { 1802 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1803 Label L; 1804 aep = __ pc(); __ push_ptr(); __ jmp(L); 1805 qep = __ pc(); __ push_ptr(); __ jmp(L); 1806 #ifndef _LP64 1807 fep = __ pc(); __ push(ftos); __ jmp(L); 1808 dep = __ pc(); __ push(dtos); __ jmp(L); 1809 #else 1810 fep = __ pc(); __ push_f(xmm0); __ jmp(L); 1811 dep = __ pc(); __ push_d(xmm0); __ jmp(L); 1812 #endif // _LP64 1813 lep = __ pc(); __ push_l(); __ jmp(L); 1814 bep = cep = sep = 1815 iep = __ pc(); __ push_i(); 1816 vep = __ pc(); 1817 __ bind(L); 1818 generate_and_dispatch(t); 1819 } 1820 1821 //----------------------------------------------------------------------------- 1822 1823 // Non-product code 1824 #ifndef PRODUCT 1825 1826 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1827 address entry = __ pc(); 1828 1829 #ifndef _LP64 1830 // prepare expression stack 1831 __ pop(rcx); // pop return address so expression stack is 'pure' 1832 __ push(state); // save tosca 1833 1834 // pass tosca registers as arguments & call tracer 1835 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1836 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1837 __ pop(state); // restore tosca 1838 1839 // return 1840 __ jmp(rcx); 1841 #else 1842 __ push(state); 1843 __ push(c_rarg0); 1844 __ push(c_rarg1); 1845 __ push(c_rarg2); 1846 __ push(c_rarg3); 1847 __ mov(c_rarg2, rax); // Pass itos 1848 #ifdef _WIN64 1849 __ movflt(xmm3, xmm0); // Pass ftos 1850 #endif 1851 __ call_VM(noreg, 1852 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1853 c_rarg1, c_rarg2, c_rarg3); 1854 __ pop(c_rarg3); 1855 __ pop(c_rarg2); 1856 __ pop(c_rarg1); 1857 __ pop(c_rarg0); 1858 __ pop(state); 1859 __ ret(0); // return from result handler 1860 #endif // _LP64 1861 1862 return entry; 1863 } 1864 1865 void TemplateInterpreterGenerator::count_bytecode() { 1866 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1867 } 1868 1869 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1870 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1871 } 1872 1873 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1874 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1875 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1876 __ orl(rbx, 1877 ((int) t->bytecode()) << 1878 BytecodePairHistogram::log2_number_of_codes); 1879 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1880 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1881 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1882 } 1883 1884 1885 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1886 // Call a little run-time stub to avoid blow-up for each bytecode. 1887 // The run-time runtime saves the right registers, depending on 1888 // the tosca in-state for the given template. 1889 1890 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1891 "entry must have been generated"); 1892 #ifndef _LP64 1893 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1894 #else 1895 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1896 __ andptr(rsp, -16); // align stack as required by ABI 1897 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1898 __ mov(rsp, r12); // restore sp 1899 __ reinit_heapbase(); 1900 #endif // _LP64 1901 } 1902 1903 1904 void TemplateInterpreterGenerator::stop_interpreter_at() { 1905 Label L; 1906 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1907 StopInterpreterAt); 1908 __ jcc(Assembler::notEqual, L); 1909 __ int3(); 1910 __ bind(L); 1911 } 1912 #endif // !PRODUCT