1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "interpreter/templateInterpreterGenerator.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/methodData.hpp" 37 #include "oops/method.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "prims/jvmtiThreadState.hpp" 41 #include "runtime/arguments.hpp" 42 #include "runtime/deoptimization.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/synchronizer.hpp" 47 #include "runtime/timer.hpp" 48 #include "runtime/vframeArray.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 53 54 // Size of interpreter code. Increase if too small. Interpreter will 55 // fail with a guarantee ("not enough space for interpreter generation"); 56 // if too small. 57 // Run with +PrintInterpreter to get the VM to print out the size. 58 // Max size with JVMTI 59 #ifdef AMD64 60 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; 61 #else 62 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 63 #endif // AMD64 64 65 // Global Register Names 66 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 67 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 68 69 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 70 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 71 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 72 73 74 //----------------------------------------------------------------------------- 75 76 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 77 address entry = __ pc(); 78 79 #ifdef ASSERT 80 { 81 Label L; 82 __ lea(rax, Address(rbp, 83 frame::interpreter_frame_monitor_block_top_offset * 84 wordSize)); 85 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 86 // grows negative) 87 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 88 __ stop ("interpreter frame not set up"); 89 __ bind(L); 90 } 91 #endif // ASSERT 92 // Restore bcp under the assumption that the current frame is still 93 // interpreted 94 __ restore_bcp(); 95 96 // expression stack must be empty before entering the VM if an 97 // exception happened 98 __ empty_expression_stack(); 99 // throw exception 100 __ call_VM(noreg, 101 CAST_FROM_FN_PTR(address, 102 InterpreterRuntime::throw_StackOverflowError)); 103 return entry; 104 } 105 106 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 107 address entry = __ pc(); 108 // The expression stack must be empty before entering the VM if an 109 // exception happened. 110 __ empty_expression_stack(); 111 112 // Setup parameters. 113 // ??? convention: expect aberrant index in register ebx/rbx. 114 // Pass array to create more detailed exceptions. 115 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 116 __ call_VM(noreg, 117 CAST_FROM_FN_PTR(address, 118 InterpreterRuntime:: 119 throw_ArrayIndexOutOfBoundsException), 120 rarg, rbx); 121 return entry; 122 } 123 124 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 125 address entry = __ pc(); 126 127 // object is at TOS 128 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 129 __ pop(rarg); 130 131 // expression stack must be empty before entering the VM if an 132 // exception happened 133 __ empty_expression_stack(); 134 135 __ call_VM(noreg, 136 CAST_FROM_FN_PTR(address, 137 InterpreterRuntime:: 138 throw_ClassCastException), 139 rarg); 140 return entry; 141 } 142 143 address TemplateInterpreterGenerator::generate_exception_handler_common( 144 const char* name, const char* message, bool pass_oop) { 145 assert(!pass_oop || message == NULL, "either oop or message but not both"); 146 address entry = __ pc(); 147 148 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 149 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 150 151 if (pass_oop) { 152 // object is at TOS 153 __ pop(rarg2); 154 } 155 // expression stack must be empty before entering the VM if an 156 // exception happened 157 __ empty_expression_stack(); 158 // setup parameters 159 __ lea(rarg, ExternalAddress((address)name)); 160 if (pass_oop) { 161 __ call_VM(rax, CAST_FROM_FN_PTR(address, 162 InterpreterRuntime:: 163 create_klass_exception), 164 rarg, rarg2); 165 } else { 166 __ lea(rarg2, ExternalAddress((address)message)); 167 __ call_VM(rax, 168 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 169 rarg, rarg2); 170 } 171 // throw exception 172 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 173 return entry; 174 } 175 176 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 177 address entry = __ pc(); 178 179 #ifndef _LP64 180 #ifdef COMPILER2 181 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 182 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 183 for (int i = 1; i < 8; i++) { 184 __ ffree(i); 185 } 186 } else if (UseSSE < 2) { 187 __ empty_FPU_stack(); 188 } 189 #endif // COMPILER2 190 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 191 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 192 } else { 193 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 194 } 195 196 if (state == ftos) { 197 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 198 } else if (state == dtos) { 199 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 200 } 201 #endif // _LP64 202 203 // Restore stack bottom in case i2c adjusted stack 204 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 205 // and NULL it as marker that esp is now tos until next java call 206 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 207 208 __ restore_bcp(); 209 __ restore_locals(); 210 211 if (state == atos) { 212 Register mdp = rbx; 213 Register tmp = rcx; 214 __ profile_return_type(mdp, rax, tmp); 215 } 216 217 const Register cache = rbx; 218 const Register index = rcx; 219 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 220 221 const Register flags = cache; 222 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 223 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 224 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 225 226 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 227 if (JvmtiExport::can_pop_frame()) { 228 NOT_LP64(__ get_thread(java_thread)); 229 __ check_and_handle_popframe(java_thread); 230 } 231 if (JvmtiExport::can_force_early_return()) { 232 NOT_LP64(__ get_thread(java_thread)); 233 __ check_and_handle_earlyret(java_thread); 234 } 235 236 __ dispatch_next(state, step); 237 238 return entry; 239 } 240 241 242 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 243 address entry = __ pc(); 244 245 #ifndef _LP64 246 if (state == ftos) { 247 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 248 } else if (state == dtos) { 249 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 250 } 251 #endif // _LP64 252 253 // NULL last_sp until next java call 254 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 255 __ restore_bcp(); 256 __ restore_locals(); 257 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 258 NOT_LP64(__ get_thread(thread)); 259 #if INCLUDE_JVMCI 260 // Check if we need to take lock at entry of synchronized method. This can 261 // only occur on method entry so emit it only for vtos with step 0. 262 if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) { 263 Label L; 264 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 265 __ jcc(Assembler::zero, L); 266 // Clear flag. 267 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 268 // Satisfy calling convention for lock_method(). 269 __ get_method(rbx); 270 // Take lock. 271 lock_method(); 272 __ bind(L); 273 } else { 274 #ifdef ASSERT 275 if (EnableJVMCI) { 276 Label L; 277 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 278 __ jcc(Assembler::zero, L); 279 __ stop("unexpected pending monitor in deopt entry"); 280 __ bind(L); 281 } 282 #endif 283 } 284 #endif 285 // handle exceptions 286 { 287 Label L; 288 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 289 __ jcc(Assembler::zero, L); 290 __ call_VM(noreg, 291 CAST_FROM_FN_PTR(address, 292 InterpreterRuntime::throw_pending_exception)); 293 __ should_not_reach_here(); 294 __ bind(L); 295 } 296 if (continuation == NULL) { 297 __ dispatch_next(state, step); 298 } else { 299 __ jump_to_entry(continuation); 300 } 301 return entry; 302 } 303 304 address TemplateInterpreterGenerator::generate_result_handler_for( 305 BasicType type) { 306 address entry = __ pc(); 307 switch (type) { 308 case T_BOOLEAN: __ c2bool(rax); break; 309 #ifndef _LP64 310 case T_CHAR : __ andptr(rax, 0xFFFF); break; 311 #else 312 case T_CHAR : __ movzwl(rax, rax); break; 313 #endif // _LP64 314 case T_BYTE : __ sign_extend_byte(rax); break; 315 case T_SHORT : __ sign_extend_short(rax); break; 316 case T_INT : /* nothing to do */ break; 317 case T_LONG : /* nothing to do */ break; 318 case T_VOID : /* nothing to do */ break; 319 #ifndef _LP64 320 case T_DOUBLE : 321 case T_FLOAT : 322 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 323 __ pop(t); // remove return address first 324 // Must return a result for interpreter or compiler. In SSE 325 // mode, results are returned in xmm0 and the FPU stack must 326 // be empty. 327 if (type == T_FLOAT && UseSSE >= 1) { 328 // Load ST0 329 __ fld_d(Address(rsp, 0)); 330 // Store as float and empty fpu stack 331 __ fstp_s(Address(rsp, 0)); 332 // and reload 333 __ movflt(xmm0, Address(rsp, 0)); 334 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 335 __ movdbl(xmm0, Address(rsp, 0)); 336 } else { 337 // restore ST0 338 __ fld_d(Address(rsp, 0)); 339 } 340 // and pop the temp 341 __ addptr(rsp, 2 * wordSize); 342 __ push(t); // restore return address 343 } 344 break; 345 #else 346 case T_FLOAT : /* nothing to do */ break; 347 case T_DOUBLE : /* nothing to do */ break; 348 #endif // _LP64 349 350 case T_OBJECT : 351 // retrieve result from frame 352 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 353 // and verify it 354 __ verify_oop(rax); 355 break; 356 default : ShouldNotReachHere(); 357 } 358 __ ret(0); // return from result handler 359 return entry; 360 } 361 362 address TemplateInterpreterGenerator::generate_safept_entry_for( 363 TosState state, 364 address runtime_entry) { 365 address entry = __ pc(); 366 __ push(state); 367 __ call_VM(noreg, runtime_entry); 368 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 369 return entry; 370 } 371 372 373 374 // Helpers for commoning out cases in the various type of method entries. 375 // 376 377 378 // increment invocation count & check for overflow 379 // 380 // Note: checking for negative value instead of overflow 381 // so we have a 'sticky' overflow test 382 // 383 // rbx: method 384 // rcx: invocation counter 385 // 386 void TemplateInterpreterGenerator::generate_counter_incr( 387 Label* overflow, 388 Label* profile_method, 389 Label* profile_method_continue) { 390 Label done; 391 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 392 if (TieredCompilation) { 393 int increment = InvocationCounter::count_increment; 394 Label no_mdo; 395 if (ProfileInterpreter) { 396 // Are we profiling? 397 __ movptr(rax, Address(rbx, Method::method_data_offset())); 398 __ testptr(rax, rax); 399 __ jccb(Assembler::zero, no_mdo); 400 // Increment counter in the MDO 401 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 402 in_bytes(InvocationCounter::counter_offset())); 403 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 404 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 405 __ jmp(done); 406 } 407 __ bind(no_mdo); 408 // Increment counter in MethodCounters 409 const Address invocation_counter(rax, 410 MethodCounters::invocation_counter_offset() + 411 InvocationCounter::counter_offset()); 412 __ get_method_counters(rbx, rax, done); 413 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 414 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 415 false, Assembler::zero, overflow); 416 __ bind(done); 417 } else { // not TieredCompilation 418 const Address backedge_counter(rax, 419 MethodCounters::backedge_counter_offset() + 420 InvocationCounter::counter_offset()); 421 const Address invocation_counter(rax, 422 MethodCounters::invocation_counter_offset() + 423 InvocationCounter::counter_offset()); 424 425 __ get_method_counters(rbx, rax, done); 426 427 if (ProfileInterpreter) { 428 __ incrementl(Address(rax, 429 MethodCounters::interpreter_invocation_counter_offset())); 430 } 431 // Update standard invocation counters 432 __ movl(rcx, invocation_counter); 433 __ incrementl(rcx, InvocationCounter::count_increment); 434 __ movl(invocation_counter, rcx); // save invocation count 435 436 __ movl(rax, backedge_counter); // load backedge counter 437 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 438 439 __ addl(rcx, rax); // add both counters 440 441 // profile_method is non-null only for interpreted method so 442 // profile_method != NULL == !native_call 443 444 if (ProfileInterpreter && profile_method != NULL) { 445 // Test to see if we should create a method data oop 446 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 447 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 448 __ jcc(Assembler::less, *profile_method_continue); 449 450 // if no method data exists, go to profile_method 451 __ test_method_data_pointer(rax, *profile_method); 452 } 453 454 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 455 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 456 __ jcc(Assembler::aboveEqual, *overflow); 457 __ bind(done); 458 } 459 } 460 461 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 462 463 // Asm interpreter on entry 464 // r14/rdi - locals 465 // r13/rsi - bcp 466 // rbx - method 467 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 468 // rbp - interpreter frame 469 470 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 471 // Everything as it was on entry 472 // rdx is not restored. Doesn't appear to really be set. 473 474 // InterpreterRuntime::frequency_counter_overflow takes two 475 // arguments, the first (thread) is passed by call_VM, the second 476 // indicates if the counter overflow occurs at a backwards branch 477 // (NULL bcp). We pass zero for it. The call returns the address 478 // of the verified entry point for the method or NULL if the 479 // compilation did not complete (either went background or bailed 480 // out). 481 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 482 __ movl(rarg, 0); 483 __ call_VM(noreg, 484 CAST_FROM_FN_PTR(address, 485 InterpreterRuntime::frequency_counter_overflow), 486 rarg); 487 488 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 489 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 490 // and jump to the interpreted entry. 491 __ jmp(do_continue, relocInfo::none); 492 } 493 494 // See if we've got enough room on the stack for locals plus overhead below 495 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 496 // without going through the signal handler, i.e., reserved and yellow zones 497 // will not be made usable. The shadow zone must suffice to handle the 498 // overflow. 499 // The expression stack grows down incrementally, so the normal guard 500 // page mechanism will work for that. 501 // 502 // NOTE: Since the additional locals are also always pushed (wasn't 503 // obvious in generate_fixed_frame) so the guard should work for them 504 // too. 505 // 506 // Args: 507 // rdx: number of additional locals this frame needs (what we must check) 508 // rbx: Method* 509 // 510 // Kills: 511 // rax 512 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 513 514 // monitor entry size: see picture of stack in frame_x86.hpp 515 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 516 517 // total overhead size: entry_size + (saved rbp through expr stack 518 // bottom). be sure to change this if you add/subtract anything 519 // to/from the overhead area 520 const int overhead_size = 521 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 522 523 const int page_size = os::vm_page_size(); 524 525 Label after_frame_check; 526 527 // see if the frame is greater than one page in size. If so, 528 // then we need to verify there is enough stack space remaining 529 // for the additional locals. 530 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 531 __ jcc(Assembler::belowEqual, after_frame_check); 532 533 // compute rsp as if this were going to be the last frame on 534 // the stack before the red zone 535 536 Label after_frame_check_pop; 537 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 538 #ifndef _LP64 539 __ push(thread); 540 __ get_thread(thread); 541 #endif 542 543 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 544 545 // locals + overhead, in bytes 546 __ mov(rax, rdx); 547 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 548 __ addptr(rax, overhead_size); 549 550 #ifdef ASSERT 551 Label limit_okay; 552 // Verify that thread stack overflow limit is non-zero. 553 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 554 __ jcc(Assembler::notEqual, limit_okay); 555 __ stop("stack overflow limit is zero"); 556 __ bind(limit_okay); 557 #endif 558 559 // Add locals/frame size to stack limit. 560 __ addptr(rax, stack_limit); 561 562 // Check against the current stack bottom. 563 __ cmpptr(rsp, rax); 564 565 __ jcc(Assembler::above, after_frame_check_pop); 566 NOT_LP64(__ pop(rsi)); // get saved bcp 567 568 // Restore sender's sp as SP. This is necessary if the sender's 569 // frame is an extended compiled frame (see gen_c2i_adapter()) 570 // and safer anyway in case of JSR292 adaptations. 571 572 __ pop(rax); // return address must be moved if SP is changed 573 __ mov(rsp, rbcp); 574 __ push(rax); 575 576 // Note: the restored frame is not necessarily interpreted. 577 // Use the shared runtime version of the StackOverflowError. 578 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 579 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 580 // all done with frame size check 581 __ bind(after_frame_check_pop); 582 NOT_LP64(__ pop(rsi)); 583 584 // all done with frame size check 585 __ bind(after_frame_check); 586 } 587 588 // Allocate monitor and lock method (asm interpreter) 589 // 590 // Args: 591 // rbx: Method* 592 // r14/rdi: locals 593 // 594 // Kills: 595 // rax 596 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 597 // rscratch1, rscratch2 (scratch regs) 598 void TemplateInterpreterGenerator::lock_method() { 599 // synchronize method 600 const Address access_flags(rbx, Method::access_flags_offset()); 601 const Address monitor_block_top( 602 rbp, 603 frame::interpreter_frame_monitor_block_top_offset * wordSize); 604 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 605 606 #ifdef ASSERT 607 { 608 Label L; 609 __ movl(rax, access_flags); 610 __ testl(rax, JVM_ACC_SYNCHRONIZED); 611 __ jcc(Assembler::notZero, L); 612 __ stop("method doesn't need synchronization"); 613 __ bind(L); 614 } 615 #endif // ASSERT 616 617 // get synchronization object 618 { 619 Label done; 620 __ movl(rax, access_flags); 621 __ testl(rax, JVM_ACC_STATIC); 622 // get receiver (assume this is frequent case) 623 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 624 __ jcc(Assembler::zero, done); 625 __ load_mirror(rax, rbx); 626 627 #ifdef ASSERT 628 { 629 Label L; 630 __ testptr(rax, rax); 631 __ jcc(Assembler::notZero, L); 632 __ stop("synchronization object is NULL"); 633 __ bind(L); 634 } 635 #endif // ASSERT 636 637 __ bind(done); 638 __ resolve(IS_NOT_NULL, rax); 639 } 640 641 // add space for monitor & lock 642 __ subptr(rsp, entry_size); // add space for a monitor entry 643 __ movptr(monitor_block_top, rsp); // set new monitor block top 644 // store object 645 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 646 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 647 __ movptr(lockreg, rsp); // object address 648 __ lock_object(lockreg); 649 } 650 651 // Generate a fixed interpreter frame. This is identical setup for 652 // interpreted methods and for native methods hence the shared code. 653 // 654 // Args: 655 // rax: return address 656 // rbx: Method* 657 // r14/rdi: pointer to locals 658 // r13/rsi: sender sp 659 // rdx: cp cache 660 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 661 // initialize fixed part of activation frame 662 __ push(rax); // save return address 663 __ enter(); // save old & set new rbp 664 __ push(rbcp); // set sender sp 665 __ push((int)NULL_WORD); // leave last_sp as null 666 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 667 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 668 __ push(rbx); // save Method* 669 // Get mirror and store it in the frame as GC root for this Method* 670 __ load_mirror(rdx, rbx); 671 __ push(rdx); 672 if (ProfileInterpreter) { 673 Label method_data_continue; 674 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 675 __ testptr(rdx, rdx); 676 __ jcc(Assembler::zero, method_data_continue); 677 __ addptr(rdx, in_bytes(MethodData::data_offset())); 678 __ bind(method_data_continue); 679 __ push(rdx); // set the mdp (method data pointer) 680 } else { 681 __ push(0); 682 } 683 684 __ movptr(rdx, Address(rbx, Method::const_offset())); 685 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 686 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 687 __ push(rdx); // set constant pool cache 688 __ push(rlocals); // set locals pointer 689 if (native_call) { 690 __ push(0); // no bcp 691 } else { 692 __ push(rbcp); // set bcp 693 } 694 __ push(0); // reserve word for pointer to expression stack bottom 695 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 696 } 697 698 // End of helpers 699 700 // Method entry for java.lang.ref.Reference.get. 701 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 702 // Code: _aload_0, _getfield, _areturn 703 // parameter size = 1 704 // 705 // The code that gets generated by this routine is split into 2 parts: 706 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 707 // 2. The slow path - which is an expansion of the regular method entry. 708 // 709 // Notes:- 710 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 711 // * We may jump to the slow path iff the receiver is null. If the 712 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 713 // Thus we can use the regular method entry code to generate the NPE. 714 // 715 // rbx: Method* 716 717 // r13: senderSP must preserve for slow path, set SP to it on fast path 718 719 address entry = __ pc(); 720 721 const int referent_offset = java_lang_ref_Reference::referent_offset; 722 guarantee(referent_offset > 0, "referent offset not initialized"); 723 724 Label slow_path; 725 // rbx: method 726 727 // Check if local 0 != NULL 728 // If the receiver is null then it is OK to jump to the slow path. 729 __ movptr(rax, Address(rsp, wordSize)); 730 731 __ testptr(rax, rax); 732 __ jcc(Assembler::zero, slow_path); 733 734 // rax: local 0 735 // rbx: method (but can be used as scratch now) 736 // rdx: scratch 737 // rdi: scratch 738 739 // Preserve the sender sp in case the load barrier 740 // calls the runtime 741 NOT_LP64(__ push(rsi)); 742 743 // Load the value of the referent field. 744 const Address field_address(rax, referent_offset); 745 __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); 746 747 // _areturn 748 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 749 NOT_LP64(__ pop(rsi)); // get sender sp 750 __ pop(rdi); // get return address 751 __ mov(rsp, sender_sp); // set sp to sender sp 752 __ jmp(rdi); 753 __ ret(0); 754 755 // generate a vanilla interpreter entry as the slow path 756 __ bind(slow_path); 757 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 758 return entry; 759 } 760 761 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 762 // Quick & dirty stack overflow checking: bang the stack & handle trap. 763 // Note that we do the banging after the frame is setup, since the exception 764 // handling code expects to find a valid interpreter frame on the stack. 765 // Doing the banging earlier fails if the caller frame is not an interpreter 766 // frame. 767 // (Also, the exception throwing code expects to unlock any synchronized 768 // method receiever, so do the banging after locking the receiver.) 769 770 // Bang each page in the shadow zone. We can't assume it's been done for 771 // an interpreter frame with greater than a page of locals, so each page 772 // needs to be checked. Only true for non-native. 773 if (UseStackBanging) { 774 const int page_size = os::vm_page_size(); 775 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 776 const int start_page = native_call ? n_shadow_pages : 1; 777 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 778 __ bang_stack_with_offset(pages*page_size); 779 } 780 } 781 } 782 783 // Interpreter stub for calling a native method. (asm interpreter) 784 // This sets up a somewhat different looking stack for calling the 785 // native method than the typical interpreter frame setup. 786 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 787 // determine code generation flags 788 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 789 790 // rbx: Method* 791 // rbcp: sender sp 792 793 address entry_point = __ pc(); 794 795 const Address constMethod (rbx, Method::const_offset()); 796 const Address access_flags (rbx, Method::access_flags_offset()); 797 const Address size_of_parameters(rcx, ConstMethod:: 798 size_of_parameters_offset()); 799 800 801 // get parameter size (always needed) 802 __ movptr(rcx, constMethod); 803 __ load_unsigned_short(rcx, size_of_parameters); 804 805 // native calls don't need the stack size check since they have no 806 // expression stack and the arguments are already on the stack and 807 // we only add a handful of words to the stack 808 809 // rbx: Method* 810 // rcx: size of parameters 811 // rbcp: sender sp 812 __ pop(rax); // get return address 813 814 // for natives the size of locals is zero 815 816 // compute beginning of parameters 817 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 818 819 // add 2 zero-initialized slots for native calls 820 // initialize result_handler slot 821 __ push((int) NULL_WORD); 822 // slot for oop temp 823 // (static native method holder mirror/jni oop result) 824 __ push((int) NULL_WORD); 825 826 // initialize fixed part of activation frame 827 generate_fixed_frame(true); 828 829 // make sure method is native & not abstract 830 #ifdef ASSERT 831 __ movl(rax, access_flags); 832 { 833 Label L; 834 __ testl(rax, JVM_ACC_NATIVE); 835 __ jcc(Assembler::notZero, L); 836 __ stop("tried to execute non-native method as native"); 837 __ bind(L); 838 } 839 { 840 Label L; 841 __ testl(rax, JVM_ACC_ABSTRACT); 842 __ jcc(Assembler::zero, L); 843 __ stop("tried to execute abstract method in interpreter"); 844 __ bind(L); 845 } 846 #endif 847 848 // Since at this point in the method invocation the exception handler 849 // would try to exit the monitor of synchronized methods which hasn't 850 // been entered yet, we set the thread local variable 851 // _do_not_unlock_if_synchronized to true. The remove_activation will 852 // check this flag. 853 854 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 855 NOT_LP64(__ get_thread(thread1)); 856 const Address do_not_unlock_if_synchronized(thread1, 857 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 858 __ movbool(do_not_unlock_if_synchronized, true); 859 860 // increment invocation count & check for overflow 861 Label invocation_counter_overflow; 862 if (inc_counter) { 863 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 864 } 865 866 Label continue_after_compile; 867 __ bind(continue_after_compile); 868 869 bang_stack_shadow_pages(true); 870 871 // reset the _do_not_unlock_if_synchronized flag 872 NOT_LP64(__ get_thread(thread1)); 873 __ movbool(do_not_unlock_if_synchronized, false); 874 875 // check for synchronized methods 876 // Must happen AFTER invocation_counter check and stack overflow check, 877 // so method is not locked if overflows. 878 if (synchronized) { 879 lock_method(); 880 } else { 881 // no synchronization necessary 882 #ifdef ASSERT 883 { 884 Label L; 885 __ movl(rax, access_flags); 886 __ testl(rax, JVM_ACC_SYNCHRONIZED); 887 __ jcc(Assembler::zero, L); 888 __ stop("method needs synchronization"); 889 __ bind(L); 890 } 891 #endif 892 } 893 894 // start execution 895 #ifdef ASSERT 896 { 897 Label L; 898 const Address monitor_block_top(rbp, 899 frame::interpreter_frame_monitor_block_top_offset * wordSize); 900 __ movptr(rax, monitor_block_top); 901 __ cmpptr(rax, rsp); 902 __ jcc(Assembler::equal, L); 903 __ stop("broken stack frame setup in interpreter"); 904 __ bind(L); 905 } 906 #endif 907 908 // jvmti support 909 __ notify_method_entry(); 910 911 // work registers 912 const Register method = rbx; 913 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 914 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 915 916 // allocate space for parameters 917 __ get_method(method); 918 __ movptr(t, Address(method, Method::const_offset())); 919 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 920 921 #ifndef _LP64 922 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 923 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 924 __ subptr(rsp, t); 925 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 926 #else 927 __ shll(t, Interpreter::logStackElementSize); 928 929 __ subptr(rsp, t); 930 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 931 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 932 #endif // _LP64 933 934 // get signature handler 935 { 936 Label L; 937 __ movptr(t, Address(method, Method::signature_handler_offset())); 938 __ testptr(t, t); 939 __ jcc(Assembler::notZero, L); 940 __ call_VM(noreg, 941 CAST_FROM_FN_PTR(address, 942 InterpreterRuntime::prepare_native_call), 943 method); 944 __ get_method(method); 945 __ movptr(t, Address(method, Method::signature_handler_offset())); 946 __ bind(L); 947 } 948 949 // call signature handler 950 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 951 "adjust this code"); 952 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 953 "adjust this code"); 954 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 955 "adjust this code"); 956 957 // The generated handlers do not touch RBX (the method oop). 958 // However, large signatures cannot be cached and are generated 959 // each time here. The slow-path generator can do a GC on return, 960 // so we must reload it after the call. 961 __ call(t); 962 __ get_method(method); // slow path can do a GC, reload RBX 963 964 965 // result handler is in rax 966 // set result handler 967 __ movptr(Address(rbp, 968 (frame::interpreter_frame_result_handler_offset) * wordSize), 969 rax); 970 971 // pass mirror handle if static call 972 { 973 Label L; 974 __ movl(t, Address(method, Method::access_flags_offset())); 975 __ testl(t, JVM_ACC_STATIC); 976 __ jcc(Assembler::zero, L); 977 // get mirror 978 __ load_mirror(t, method, rax); 979 // copy mirror into activation frame 980 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 981 t); 982 // pass handle to mirror 983 #ifndef _LP64 984 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 985 __ movptr(Address(rsp, wordSize), t); 986 #else 987 __ lea(c_rarg1, 988 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 989 #endif // _LP64 990 __ bind(L); 991 } 992 993 // get native function entry point 994 { 995 Label L; 996 __ movptr(rax, Address(method, Method::native_function_offset())); 997 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 998 __ cmpptr(rax, unsatisfied.addr()); 999 __ jcc(Assembler::notEqual, L); 1000 __ call_VM(noreg, 1001 CAST_FROM_FN_PTR(address, 1002 InterpreterRuntime::prepare_native_call), 1003 method); 1004 __ get_method(method); 1005 __ movptr(rax, Address(method, Method::native_function_offset())); 1006 __ bind(L); 1007 } 1008 1009 // pass JNIEnv 1010 #ifndef _LP64 1011 __ get_thread(thread); 1012 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1013 __ movptr(Address(rsp, 0), t); 1014 1015 // set_last_Java_frame_before_call 1016 // It is enough that the pc() 1017 // points into the right code segment. It does not have to be the correct return pc. 1018 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1019 #else 1020 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1021 1022 // It is enough that the pc() points into the right code 1023 // segment. It does not have to be the correct return pc. 1024 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1025 #endif // _LP64 1026 1027 // change thread state 1028 #ifdef ASSERT 1029 { 1030 Label L; 1031 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1032 __ cmpl(t, _thread_in_Java); 1033 __ jcc(Assembler::equal, L); 1034 __ stop("Wrong thread state in native stub"); 1035 __ bind(L); 1036 } 1037 #endif 1038 1039 // Change state to native 1040 1041 __ movl(Address(thread, JavaThread::thread_state_offset()), 1042 _thread_in_native); 1043 1044 // Call the native method. 1045 __ call(rax); 1046 // 32: result potentially in rdx:rax or ST0 1047 // 64: result potentially in rax or xmm0 1048 1049 // Verify or restore cpu control state after JNI call 1050 __ restore_cpu_control_state_after_jni(); 1051 1052 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1053 // in order to extract the result of a method call. If the order of these 1054 // pushes change or anything else is added to the stack then the code in 1055 // interpreter_frame_result must also change. 1056 1057 #ifndef _LP64 1058 // save potential result in ST(0) & rdx:rax 1059 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1060 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1061 // It is safe to do this push because state is _thread_in_native and return address will be found 1062 // via _last_native_pc and not via _last_jave_sp 1063 1064 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1065 // If the order changes or anything else is added to the stack the code in 1066 // interpreter_frame_result will have to be changed. 1067 1068 { Label L; 1069 Label push_double; 1070 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1071 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1072 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1073 float_handler.addr()); 1074 __ jcc(Assembler::equal, push_double); 1075 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1076 double_handler.addr()); 1077 __ jcc(Assembler::notEqual, L); 1078 __ bind(push_double); 1079 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1080 __ bind(L); 1081 } 1082 #else 1083 __ push(dtos); 1084 #endif // _LP64 1085 1086 __ push(ltos); 1087 1088 // change thread state 1089 NOT_LP64(__ get_thread(thread)); 1090 __ movl(Address(thread, JavaThread::thread_state_offset()), 1091 _thread_in_native_trans); 1092 1093 if (os::is_MP()) { 1094 if (UseMembar) { 1095 // Force this write out before the read below 1096 __ membar(Assembler::Membar_mask_bits( 1097 Assembler::LoadLoad | Assembler::LoadStore | 1098 Assembler::StoreLoad | Assembler::StoreStore)); 1099 } else { 1100 // Write serialization page so VM thread can do a pseudo remote membar. 1101 // We use the current thread pointer to calculate a thread specific 1102 // offset to write to within the page. This minimizes bus traffic 1103 // due to cache line collision. 1104 __ serialize_memory(thread, rcx); 1105 } 1106 } 1107 1108 #ifndef _LP64 1109 if (AlwaysRestoreFPU) { 1110 // Make sure the control word is correct. 1111 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1112 } 1113 #endif // _LP64 1114 1115 // check for safepoint operation in progress and/or pending suspend requests 1116 { 1117 Label Continue; 1118 Label slow_path; 1119 1120 #ifndef _LP64 1121 __ safepoint_poll(slow_path, thread, noreg); 1122 #else 1123 __ safepoint_poll(slow_path, r15_thread, rscratch1); 1124 #endif 1125 1126 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1127 __ jcc(Assembler::equal, Continue); 1128 __ bind(slow_path); 1129 1130 // Don't use call_VM as it will see a possible pending exception 1131 // and forward it and never return here preventing us from 1132 // clearing _last_native_pc down below. Also can't use 1133 // call_VM_leaf either as it will check to see if r13 & r14 are 1134 // preserved and correspond to the bcp/locals pointers. So we do a 1135 // runtime call by hand. 1136 // 1137 #ifndef _LP64 1138 __ push(thread); 1139 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1140 JavaThread::check_special_condition_for_native_trans))); 1141 __ increment(rsp, wordSize); 1142 __ get_thread(thread); 1143 #else 1144 __ mov(c_rarg0, r15_thread); 1145 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1146 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1147 __ andptr(rsp, -16); // align stack as required by ABI 1148 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1149 __ mov(rsp, r12); // restore sp 1150 __ reinit_heapbase(); 1151 #endif // _LP64 1152 __ bind(Continue); 1153 } 1154 1155 // change thread state 1156 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1157 1158 // reset_last_Java_frame 1159 __ reset_last_Java_frame(thread, true); 1160 1161 if (CheckJNICalls) { 1162 // clear_pending_jni_exception_check 1163 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1164 } 1165 1166 // reset handle block 1167 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1168 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1169 1170 // If result is an oop unbox and store it in frame where gc will see it 1171 // and result handler will pick it up 1172 1173 { 1174 Label no_oop; 1175 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1176 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1177 __ jcc(Assembler::notEqual, no_oop); 1178 // retrieve result 1179 __ pop(ltos); 1180 // Unbox oop result, e.g. JNIHandles::resolve value. 1181 __ resolve_jobject(rax /* value */, 1182 thread /* thread */, 1183 t /* tmp */); 1184 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1185 // keep stack depth as expected by pushing oop which will eventually be discarded 1186 __ push(ltos); 1187 __ bind(no_oop); 1188 } 1189 1190 1191 { 1192 Label no_reguard; 1193 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1194 JavaThread::stack_guard_yellow_reserved_disabled); 1195 __ jcc(Assembler::notEqual, no_reguard); 1196 1197 __ pusha(); // XXX only save smashed registers 1198 #ifndef _LP64 1199 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1200 __ popa(); 1201 #else 1202 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1203 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1204 __ andptr(rsp, -16); // align stack as required by ABI 1205 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1206 __ mov(rsp, r12); // restore sp 1207 __ popa(); // XXX only restore smashed registers 1208 __ reinit_heapbase(); 1209 #endif // _LP64 1210 1211 __ bind(no_reguard); 1212 } 1213 1214 1215 // The method register is junk from after the thread_in_native transition 1216 // until here. Also can't call_VM until the bcp has been 1217 // restored. Need bcp for throwing exception below so get it now. 1218 __ get_method(method); 1219 1220 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1221 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1222 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1223 1224 // handle exceptions (exception handling will handle unlocking!) 1225 { 1226 Label L; 1227 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1228 __ jcc(Assembler::zero, L); 1229 // Note: At some point we may want to unify this with the code 1230 // used in call_VM_base(); i.e., we should use the 1231 // StubRoutines::forward_exception code. For now this doesn't work 1232 // here because the rsp is not correctly set at this point. 1233 __ MacroAssembler::call_VM(noreg, 1234 CAST_FROM_FN_PTR(address, 1235 InterpreterRuntime::throw_pending_exception)); 1236 __ should_not_reach_here(); 1237 __ bind(L); 1238 } 1239 1240 // do unlocking if necessary 1241 { 1242 Label L; 1243 __ movl(t, Address(method, Method::access_flags_offset())); 1244 __ testl(t, JVM_ACC_SYNCHRONIZED); 1245 __ jcc(Assembler::zero, L); 1246 // the code below should be shared with interpreter macro 1247 // assembler implementation 1248 { 1249 Label unlock; 1250 // BasicObjectLock will be first in list, since this is a 1251 // synchronized method. However, need to check that the object 1252 // has not been unlocked by an explicit monitorexit bytecode. 1253 const Address monitor(rbp, 1254 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1255 wordSize - (int)sizeof(BasicObjectLock))); 1256 1257 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1258 1259 // monitor expect in c_rarg1 for slow unlock path 1260 __ lea(regmon, monitor); // address of first monitor 1261 1262 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1263 __ testptr(t, t); 1264 __ jcc(Assembler::notZero, unlock); 1265 1266 // Entry already unlocked, need to throw exception 1267 __ MacroAssembler::call_VM(noreg, 1268 CAST_FROM_FN_PTR(address, 1269 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1270 __ should_not_reach_here(); 1271 1272 __ bind(unlock); 1273 __ unlock_object(regmon); 1274 } 1275 __ bind(L); 1276 } 1277 1278 // jvmti support 1279 // Note: This must happen _after_ handling/throwing any exceptions since 1280 // the exception handler code notifies the runtime of method exits 1281 // too. If this happens before, method entry/exit notifications are 1282 // not properly paired (was bug - gri 11/22/99). 1283 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1284 1285 // restore potential result in edx:eax, call result handler to 1286 // restore potential result in ST0 & handle result 1287 1288 __ pop(ltos); 1289 LP64_ONLY( __ pop(dtos)); 1290 1291 __ movptr(t, Address(rbp, 1292 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1293 __ call(t); 1294 1295 // remove activation 1296 __ movptr(t, Address(rbp, 1297 frame::interpreter_frame_sender_sp_offset * 1298 wordSize)); // get sender sp 1299 __ leave(); // remove frame anchor 1300 __ pop(rdi); // get return address 1301 __ mov(rsp, t); // set sp to sender sp 1302 __ jmp(rdi); 1303 1304 if (inc_counter) { 1305 // Handle overflow of counter and compile method 1306 __ bind(invocation_counter_overflow); 1307 generate_counter_overflow(continue_after_compile); 1308 } 1309 1310 return entry_point; 1311 } 1312 1313 // Abstract method entry 1314 // Attempt to execute abstract method. Throw exception 1315 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1316 1317 address entry_point = __ pc(); 1318 1319 // abstract method entry 1320 1321 // pop return address, reset last_sp to NULL 1322 __ empty_expression_stack(); 1323 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1324 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1325 1326 // throw exception 1327 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); 1328 // the call_VM checks for exception, so we should never return here. 1329 __ should_not_reach_here(); 1330 1331 return entry_point; 1332 } 1333 1334 // 1335 // Generic interpreted method entry to (asm) interpreter 1336 // 1337 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1338 // determine code generation flags 1339 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1340 1341 // ebx: Method* 1342 // rbcp: sender sp 1343 address entry_point = __ pc(); 1344 1345 const Address constMethod(rbx, Method::const_offset()); 1346 const Address access_flags(rbx, Method::access_flags_offset()); 1347 const Address size_of_parameters(rdx, 1348 ConstMethod::size_of_parameters_offset()); 1349 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1350 1351 1352 // get parameter size (always needed) 1353 __ movptr(rdx, constMethod); 1354 __ load_unsigned_short(rcx, size_of_parameters); 1355 1356 // rbx: Method* 1357 // rcx: size of parameters 1358 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1359 1360 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1361 __ subl(rdx, rcx); // rdx = no. of additional locals 1362 1363 // YYY 1364 // __ incrementl(rdx); 1365 // __ andl(rdx, -2); 1366 1367 // see if we've got enough room on the stack for locals plus overhead. 1368 generate_stack_overflow_check(); 1369 1370 // get return address 1371 __ pop(rax); 1372 1373 // compute beginning of parameters 1374 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1375 1376 // rdx - # of additional locals 1377 // allocate space for locals 1378 // explicitly initialize locals 1379 { 1380 Label exit, loop; 1381 __ testl(rdx, rdx); 1382 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1383 __ bind(loop); 1384 __ push((int) NULL_WORD); // initialize local variables 1385 __ decrementl(rdx); // until everything initialized 1386 __ jcc(Assembler::greater, loop); 1387 __ bind(exit); 1388 } 1389 1390 // initialize fixed part of activation frame 1391 generate_fixed_frame(false); 1392 1393 // make sure method is not native & not abstract 1394 #ifdef ASSERT 1395 __ movl(rax, access_flags); 1396 { 1397 Label L; 1398 __ testl(rax, JVM_ACC_NATIVE); 1399 __ jcc(Assembler::zero, L); 1400 __ stop("tried to execute native method as non-native"); 1401 __ bind(L); 1402 } 1403 { 1404 Label L; 1405 __ testl(rax, JVM_ACC_ABSTRACT); 1406 __ jcc(Assembler::zero, L); 1407 __ stop("tried to execute abstract method in interpreter"); 1408 __ bind(L); 1409 } 1410 #endif 1411 1412 // Since at this point in the method invocation the exception 1413 // handler would try to exit the monitor of synchronized methods 1414 // which hasn't been entered yet, we set the thread local variable 1415 // _do_not_unlock_if_synchronized to true. The remove_activation 1416 // will check this flag. 1417 1418 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1419 NOT_LP64(__ get_thread(thread)); 1420 const Address do_not_unlock_if_synchronized(thread, 1421 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1422 __ movbool(do_not_unlock_if_synchronized, true); 1423 1424 __ profile_parameters_type(rax, rcx, rdx); 1425 // increment invocation count & check for overflow 1426 Label invocation_counter_overflow; 1427 Label profile_method; 1428 Label profile_method_continue; 1429 if (inc_counter) { 1430 generate_counter_incr(&invocation_counter_overflow, 1431 &profile_method, 1432 &profile_method_continue); 1433 if (ProfileInterpreter) { 1434 __ bind(profile_method_continue); 1435 } 1436 } 1437 1438 Label continue_after_compile; 1439 __ bind(continue_after_compile); 1440 1441 // check for synchronized interpreted methods 1442 bang_stack_shadow_pages(false); 1443 1444 // reset the _do_not_unlock_if_synchronized flag 1445 NOT_LP64(__ get_thread(thread)); 1446 __ movbool(do_not_unlock_if_synchronized, false); 1447 1448 // check for synchronized methods 1449 // Must happen AFTER invocation_counter check and stack overflow check, 1450 // so method is not locked if overflows. 1451 if (synchronized) { 1452 // Allocate monitor and lock method 1453 lock_method(); 1454 } else { 1455 // no synchronization necessary 1456 #ifdef ASSERT 1457 { 1458 Label L; 1459 __ movl(rax, access_flags); 1460 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1461 __ jcc(Assembler::zero, L); 1462 __ stop("method needs synchronization"); 1463 __ bind(L); 1464 } 1465 #endif 1466 } 1467 1468 // start execution 1469 #ifdef ASSERT 1470 { 1471 Label L; 1472 const Address monitor_block_top (rbp, 1473 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1474 __ movptr(rax, monitor_block_top); 1475 __ cmpptr(rax, rsp); 1476 __ jcc(Assembler::equal, L); 1477 __ stop("broken stack frame setup in interpreter"); 1478 __ bind(L); 1479 } 1480 #endif 1481 1482 // jvmti support 1483 __ notify_method_entry(); 1484 1485 __ dispatch_next(vtos); 1486 1487 // invocation counter overflow 1488 if (inc_counter) { 1489 if (ProfileInterpreter) { 1490 // We have decided to profile this method in the interpreter 1491 __ bind(profile_method); 1492 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1493 __ set_method_data_pointer_for_bcp(); 1494 __ get_method(rbx); 1495 __ jmp(profile_method_continue); 1496 } 1497 // Handle overflow of counter and compile method 1498 __ bind(invocation_counter_overflow); 1499 generate_counter_overflow(continue_after_compile); 1500 } 1501 1502 return entry_point; 1503 } 1504 1505 //----------------------------------------------------------------------------- 1506 // Exceptions 1507 1508 void TemplateInterpreterGenerator::generate_throw_exception() { 1509 // Entry point in previous activation (i.e., if the caller was 1510 // interpreted) 1511 Interpreter::_rethrow_exception_entry = __ pc(); 1512 // Restore sp to interpreter_frame_last_sp even though we are going 1513 // to empty the expression stack for the exception processing. 1514 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1515 // rax: exception 1516 // rdx: return address/pc that threw exception 1517 __ restore_bcp(); // r13/rsi points to call/send 1518 __ restore_locals(); 1519 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1520 // Entry point for exceptions thrown within interpreter code 1521 Interpreter::_throw_exception_entry = __ pc(); 1522 // expression stack is undefined here 1523 // rax: exception 1524 // r13/rsi: exception bcp 1525 __ verify_oop(rax); 1526 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1527 LP64_ONLY(__ mov(c_rarg1, rax)); 1528 1529 // expression stack must be empty before entering the VM in case of 1530 // an exception 1531 __ empty_expression_stack(); 1532 // find exception handler address and preserve exception oop 1533 __ call_VM(rdx, 1534 CAST_FROM_FN_PTR(address, 1535 InterpreterRuntime::exception_handler_for_exception), 1536 rarg); 1537 // rax: exception handler entry point 1538 // rdx: preserved exception oop 1539 // r13/rsi: bcp for exception handler 1540 __ push_ptr(rdx); // push exception which is now the only value on the stack 1541 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1542 1543 // If the exception is not handled in the current frame the frame is 1544 // removed and the exception is rethrown (i.e. exception 1545 // continuation is _rethrow_exception). 1546 // 1547 // Note: At this point the bci is still the bxi for the instruction 1548 // which caused the exception and the expression stack is 1549 // empty. Thus, for any VM calls at this point, GC will find a legal 1550 // oop map (with empty expression stack). 1551 1552 // In current activation 1553 // tos: exception 1554 // esi: exception bcp 1555 1556 // 1557 // JVMTI PopFrame support 1558 // 1559 1560 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1561 __ empty_expression_stack(); 1562 // Set the popframe_processing bit in pending_popframe_condition 1563 // indicating that we are currently handling popframe, so that 1564 // call_VMs that may happen later do not trigger new popframe 1565 // handling cycles. 1566 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1567 NOT_LP64(__ get_thread(thread)); 1568 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1569 __ orl(rdx, JavaThread::popframe_processing_bit); 1570 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1571 1572 { 1573 // Check to see whether we are returning to a deoptimized frame. 1574 // (The PopFrame call ensures that the caller of the popped frame is 1575 // either interpreted or compiled and deoptimizes it if compiled.) 1576 // In this case, we can't call dispatch_next() after the frame is 1577 // popped, but instead must save the incoming arguments and restore 1578 // them after deoptimization has occurred. 1579 // 1580 // Note that we don't compare the return PC against the 1581 // deoptimization blob's unpack entry because of the presence of 1582 // adapter frames in C2. 1583 Label caller_not_deoptimized; 1584 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1585 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1586 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1587 InterpreterRuntime::interpreter_contains), rarg); 1588 __ testl(rax, rax); 1589 __ jcc(Assembler::notZero, caller_not_deoptimized); 1590 1591 // Compute size of arguments for saving when returning to 1592 // deoptimized caller 1593 __ get_method(rax); 1594 __ movptr(rax, Address(rax, Method::const_offset())); 1595 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1596 size_of_parameters_offset()))); 1597 __ shll(rax, Interpreter::logStackElementSize); 1598 __ restore_locals(); 1599 __ subptr(rlocals, rax); 1600 __ addptr(rlocals, wordSize); 1601 // Save these arguments 1602 NOT_LP64(__ get_thread(thread)); 1603 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1604 Deoptimization:: 1605 popframe_preserve_args), 1606 thread, rax, rlocals); 1607 1608 __ remove_activation(vtos, rdx, 1609 /* throw_monitor_exception */ false, 1610 /* install_monitor_exception */ false, 1611 /* notify_jvmdi */ false); 1612 1613 // Inform deoptimization that it is responsible for restoring 1614 // these arguments 1615 NOT_LP64(__ get_thread(thread)); 1616 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1617 JavaThread::popframe_force_deopt_reexecution_bit); 1618 1619 // Continue in deoptimization handler 1620 __ jmp(rdx); 1621 1622 __ bind(caller_not_deoptimized); 1623 } 1624 1625 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1626 /* throw_monitor_exception */ false, 1627 /* install_monitor_exception */ false, 1628 /* notify_jvmdi */ false); 1629 1630 // Finish with popframe handling 1631 // A previous I2C followed by a deoptimization might have moved the 1632 // outgoing arguments further up the stack. PopFrame expects the 1633 // mutations to those outgoing arguments to be preserved and other 1634 // constraints basically require this frame to look exactly as 1635 // though it had previously invoked an interpreted activation with 1636 // no space between the top of the expression stack (current 1637 // last_sp) and the top of stack. Rather than force deopt to 1638 // maintain this kind of invariant all the time we call a small 1639 // fixup routine to move the mutated arguments onto the top of our 1640 // expression stack if necessary. 1641 #ifndef _LP64 1642 __ mov(rax, rsp); 1643 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1644 __ get_thread(thread); 1645 // PC must point into interpreter here 1646 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1647 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1648 __ get_thread(thread); 1649 #else 1650 __ mov(c_rarg1, rsp); 1651 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1652 // PC must point into interpreter here 1653 __ set_last_Java_frame(noreg, rbp, __ pc()); 1654 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1655 #endif 1656 __ reset_last_Java_frame(thread, true); 1657 1658 // Restore the last_sp and null it out 1659 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1660 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1661 1662 __ restore_bcp(); 1663 __ restore_locals(); 1664 // The method data pointer was incremented already during 1665 // call profiling. We have to restore the mdp for the current bcp. 1666 if (ProfileInterpreter) { 1667 __ set_method_data_pointer_for_bcp(); 1668 } 1669 1670 // Clear the popframe condition flag 1671 NOT_LP64(__ get_thread(thread)); 1672 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1673 JavaThread::popframe_inactive); 1674 1675 #if INCLUDE_JVMTI 1676 { 1677 Label L_done; 1678 const Register local0 = rlocals; 1679 1680 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1681 __ jcc(Assembler::notEqual, L_done); 1682 1683 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1684 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1685 1686 __ get_method(rdx); 1687 __ movptr(rax, Address(local0, 0)); 1688 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1689 1690 __ testptr(rax, rax); 1691 __ jcc(Assembler::zero, L_done); 1692 1693 __ movptr(Address(rbx, 0), rax); 1694 __ bind(L_done); 1695 } 1696 #endif // INCLUDE_JVMTI 1697 1698 __ dispatch_next(vtos); 1699 // end of PopFrame support 1700 1701 Interpreter::_remove_activation_entry = __ pc(); 1702 1703 // preserve exception over this code sequence 1704 __ pop_ptr(rax); 1705 NOT_LP64(__ get_thread(thread)); 1706 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1707 // remove the activation (without doing throws on illegalMonitorExceptions) 1708 __ remove_activation(vtos, rdx, false, true, false); 1709 // restore exception 1710 NOT_LP64(__ get_thread(thread)); 1711 __ get_vm_result(rax, thread); 1712 1713 // In between activations - previous activation type unknown yet 1714 // compute continuation point - the continuation point expects the 1715 // following registers set up: 1716 // 1717 // rax: exception 1718 // rdx: return address/pc that threw exception 1719 // rsp: expression stack of caller 1720 // rbp: ebp of caller 1721 __ push(rax); // save exception 1722 __ push(rdx); // save return address 1723 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1724 SharedRuntime::exception_handler_for_return_address), 1725 thread, rdx); 1726 __ mov(rbx, rax); // save exception handler 1727 __ pop(rdx); // restore return address 1728 __ pop(rax); // restore exception 1729 // Note that an "issuing PC" is actually the next PC after the call 1730 __ jmp(rbx); // jump to exception 1731 // handler of caller 1732 } 1733 1734 1735 // 1736 // JVMTI ForceEarlyReturn support 1737 // 1738 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1739 address entry = __ pc(); 1740 1741 __ restore_bcp(); 1742 __ restore_locals(); 1743 __ empty_expression_stack(); 1744 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1745 1746 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1747 NOT_LP64(__ get_thread(thread)); 1748 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1749 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1750 1751 // Clear the earlyret state 1752 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1753 1754 __ remove_activation(state, rsi, 1755 false, /* throw_monitor_exception */ 1756 false, /* install_monitor_exception */ 1757 true); /* notify_jvmdi */ 1758 __ jmp(rsi); 1759 1760 return entry; 1761 } // end of ForceEarlyReturn support 1762 1763 1764 //----------------------------------------------------------------------------- 1765 // Helper for vtos entry point generation 1766 1767 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1768 address& bep, 1769 address& cep, 1770 address& sep, 1771 address& aep, 1772 address& iep, 1773 address& lep, 1774 address& fep, 1775 address& dep, 1776 address& vep) { 1777 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1778 Label L; 1779 aep = __ pc(); // atos entry point 1780 __ push_ptr(); 1781 __ jmp(L); 1782 #ifndef _LP64 1783 fep = __ pc(); // ftos entry point 1784 __ push(ftos); 1785 __ jmp(L); 1786 dep = __ pc(); // dtos entry point 1787 __ push(dtos); 1788 __ jmp(L); 1789 #else 1790 fep = __ pc(); // ftos entry point 1791 __ push_f(xmm0); 1792 __ jmp(L); 1793 dep = __ pc(); // dtos entry point 1794 __ push_d(xmm0); 1795 __ jmp(L); 1796 #endif // _LP64 1797 lep = __ pc(); // ltos entry point 1798 __ push_l(); 1799 __ jmp(L); 1800 bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point 1801 __ push_i(); 1802 vep = __ pc(); // vtos entry point 1803 __ bind(L); 1804 generate_and_dispatch(t); 1805 } 1806 1807 //----------------------------------------------------------------------------- 1808 1809 // Non-product code 1810 #ifndef PRODUCT 1811 1812 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1813 address entry = __ pc(); 1814 1815 #ifndef _LP64 1816 // prepare expression stack 1817 __ pop(rcx); // pop return address so expression stack is 'pure' 1818 __ push(state); // save tosca 1819 1820 // pass tosca registers as arguments & call tracer 1821 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1822 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1823 __ pop(state); // restore tosca 1824 1825 // return 1826 __ jmp(rcx); 1827 #else 1828 __ push(state); 1829 __ push(c_rarg0); 1830 __ push(c_rarg1); 1831 __ push(c_rarg2); 1832 __ push(c_rarg3); 1833 __ mov(c_rarg2, rax); // Pass itos 1834 #ifdef _WIN64 1835 __ movflt(xmm3, xmm0); // Pass ftos 1836 #endif 1837 __ call_VM(noreg, 1838 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1839 c_rarg1, c_rarg2, c_rarg3); 1840 __ pop(c_rarg3); 1841 __ pop(c_rarg2); 1842 __ pop(c_rarg1); 1843 __ pop(c_rarg0); 1844 __ pop(state); 1845 __ ret(0); // return from result handler 1846 #endif // _LP64 1847 1848 return entry; 1849 } 1850 1851 void TemplateInterpreterGenerator::count_bytecode() { 1852 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1853 } 1854 1855 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1856 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1857 } 1858 1859 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1860 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1861 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1862 __ orl(rbx, 1863 ((int) t->bytecode()) << 1864 BytecodePairHistogram::log2_number_of_codes); 1865 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1866 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1867 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1868 } 1869 1870 1871 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1872 // Call a little run-time stub to avoid blow-up for each bytecode. 1873 // The run-time runtime saves the right registers, depending on 1874 // the tosca in-state for the given template. 1875 1876 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1877 "entry must have been generated"); 1878 #ifndef _LP64 1879 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1880 #else 1881 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1882 __ andptr(rsp, -16); // align stack as required by ABI 1883 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1884 __ mov(rsp, r12); // restore sp 1885 __ reinit_heapbase(); 1886 #endif // _LP64 1887 } 1888 1889 1890 void TemplateInterpreterGenerator::stop_interpreter_at() { 1891 Label L; 1892 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1893 StopInterpreterAt); 1894 __ jcc(Assembler::notEqual, L); 1895 __ int3(); 1896 __ bind(L); 1897 } 1898 #endif // !PRODUCT