1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "gc/shared/barrierSetAssembler.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/interp_masm.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "interpreter/templateInterpreterGenerator.hpp" 33 #include "interpreter/templateTable.hpp" 34 #include "oops/arrayOop.hpp" 35 #include "oops/methodData.hpp" 36 #include "oops/method.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/debug.hpp" 49 #include "utilities/macros.hpp" 50 51 #define __ _masm-> 52 53 // Size of interpreter code. Increase if too small. Interpreter will 54 // fail with a guarantee ("not enough space for interpreter generation"); 55 // if too small. 56 // Run with +PrintInterpreter to get the VM to print out the size. 57 // Max size with JVMTI 58 #ifdef AMD64 59 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; 60 #else 61 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 62 #endif // AMD64 63 64 // Global Register Names 65 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 66 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 67 68 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 69 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 70 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 71 72 73 //----------------------------------------------------------------------------- 74 75 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 76 address entry = __ pc(); 77 78 #ifdef ASSERT 79 { 80 Label L; 81 __ lea(rax, Address(rbp, 82 frame::interpreter_frame_monitor_block_top_offset * 83 wordSize)); 84 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 85 // grows negative) 86 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 87 __ stop ("interpreter frame not set up"); 88 __ bind(L); 89 } 90 #endif // ASSERT 91 // Restore bcp under the assumption that the current frame is still 92 // interpreted 93 __ restore_bcp(); 94 95 // expression stack must be empty before entering the VM if an 96 // exception happened 97 __ empty_expression_stack(); 98 // throw exception 99 __ call_VM(noreg, 100 CAST_FROM_FN_PTR(address, 101 InterpreterRuntime::throw_StackOverflowError)); 102 return entry; 103 } 104 105 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 106 address entry = __ pc(); 107 // The expression stack must be empty before entering the VM if an 108 // exception happened. 109 __ empty_expression_stack(); 110 111 // Setup parameters. 112 // ??? convention: expect aberrant index in register ebx/rbx. 113 // Pass array to create more detailed exceptions. 114 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 115 __ call_VM(noreg, 116 CAST_FROM_FN_PTR(address, 117 InterpreterRuntime:: 118 throw_ArrayIndexOutOfBoundsException), 119 rarg, rbx); 120 return entry; 121 } 122 123 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 124 address entry = __ pc(); 125 126 // object is at TOS 127 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 128 __ pop(rarg); 129 130 // expression stack must be empty before entering the VM if an 131 // exception happened 132 __ empty_expression_stack(); 133 134 __ call_VM(noreg, 135 CAST_FROM_FN_PTR(address, 136 InterpreterRuntime:: 137 throw_ClassCastException), 138 rarg); 139 return entry; 140 } 141 142 address TemplateInterpreterGenerator::generate_exception_handler_common( 143 const char* name, const char* message, bool pass_oop) { 144 assert(!pass_oop || message == NULL, "either oop or message but not both"); 145 address entry = __ pc(); 146 147 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 148 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 149 150 if (pass_oop) { 151 // object is at TOS 152 __ pop(rarg2); 153 } 154 // expression stack must be empty before entering the VM if an 155 // exception happened 156 __ empty_expression_stack(); 157 // setup parameters 158 __ lea(rarg, ExternalAddress((address)name)); 159 if (pass_oop) { 160 __ call_VM(rax, CAST_FROM_FN_PTR(address, 161 InterpreterRuntime:: 162 create_klass_exception), 163 rarg, rarg2); 164 } else { 165 __ lea(rarg2, ExternalAddress((address)message)); 166 __ call_VM(rax, 167 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 168 rarg, rarg2); 169 } 170 // throw exception 171 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 172 return entry; 173 } 174 175 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 176 address entry = __ pc(); 177 178 #ifndef _LP64 179 #ifdef COMPILER2 180 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 181 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 182 for (int i = 1; i < 8; i++) { 183 __ ffree(i); 184 } 185 } else if (UseSSE < 2) { 186 __ empty_FPU_stack(); 187 } 188 #endif // COMPILER2 189 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 190 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 191 } else { 192 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 193 } 194 195 if (state == ftos) { 196 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 197 } else if (state == dtos) { 198 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 199 } 200 #endif // _LP64 201 202 // Restore stack bottom in case i2c adjusted stack 203 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 204 // and NULL it as marker that esp is now tos until next java call 205 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 206 207 __ restore_bcp(); 208 __ restore_locals(); 209 210 if (state == atos) { 211 Register mdp = rbx; 212 Register tmp = rcx; 213 __ profile_return_type(mdp, rax, tmp); 214 } 215 216 const Register cache = rbx; 217 const Register index = rcx; 218 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 219 220 const Register flags = cache; 221 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 222 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 223 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 224 225 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 226 if (JvmtiExport::can_pop_frame()) { 227 NOT_LP64(__ get_thread(java_thread)); 228 __ check_and_handle_popframe(java_thread); 229 } 230 if (JvmtiExport::can_force_early_return()) { 231 NOT_LP64(__ get_thread(java_thread)); 232 __ check_and_handle_earlyret(java_thread); 233 } 234 235 __ dispatch_next(state, step); 236 237 return entry; 238 } 239 240 241 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 242 address entry = __ pc(); 243 244 #ifndef _LP64 245 if (state == ftos) { 246 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 247 } else if (state == dtos) { 248 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 249 } 250 #endif // _LP64 251 252 // NULL last_sp until next java call 253 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 254 __ restore_bcp(); 255 __ restore_locals(); 256 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 257 NOT_LP64(__ get_thread(thread)); 258 #if INCLUDE_JVMCI 259 // Check if we need to take lock at entry of synchronized method. This can 260 // only occur on method entry so emit it only for vtos with step 0. 261 if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) { 262 Label L; 263 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 264 __ jcc(Assembler::zero, L); 265 // Clear flag. 266 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 267 // Satisfy calling convention for lock_method(). 268 __ get_method(rbx); 269 // Take lock. 270 lock_method(); 271 __ bind(L); 272 } else { 273 #ifdef ASSERT 274 if (EnableJVMCI) { 275 Label L; 276 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 277 __ jccb(Assembler::zero, L); 278 __ stop("unexpected pending monitor in deopt entry"); 279 __ bind(L); 280 } 281 #endif 282 } 283 #endif 284 // handle exceptions 285 { 286 Label L; 287 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 288 __ jcc(Assembler::zero, L); 289 __ call_VM(noreg, 290 CAST_FROM_FN_PTR(address, 291 InterpreterRuntime::throw_pending_exception)); 292 __ should_not_reach_here(); 293 __ bind(L); 294 } 295 if (continuation == NULL) { 296 __ dispatch_next(state, step); 297 } else { 298 __ jump_to_entry(continuation); 299 } 300 return entry; 301 } 302 303 address TemplateInterpreterGenerator::generate_result_handler_for( 304 BasicType type) { 305 address entry = __ pc(); 306 switch (type) { 307 case T_BOOLEAN: __ c2bool(rax); break; 308 #ifndef _LP64 309 case T_CHAR : __ andptr(rax, 0xFFFF); break; 310 #else 311 case T_CHAR : __ movzwl(rax, rax); break; 312 #endif // _LP64 313 case T_BYTE : __ sign_extend_byte(rax); break; 314 case T_SHORT : __ sign_extend_short(rax); break; 315 case T_INT : /* nothing to do */ break; 316 case T_LONG : /* nothing to do */ break; 317 case T_VOID : /* nothing to do */ break; 318 #ifndef _LP64 319 case T_DOUBLE : 320 case T_FLOAT : 321 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 322 __ pop(t); // remove return address first 323 // Must return a result for interpreter or compiler. In SSE 324 // mode, results are returned in xmm0 and the FPU stack must 325 // be empty. 326 if (type == T_FLOAT && UseSSE >= 1) { 327 // Load ST0 328 __ fld_d(Address(rsp, 0)); 329 // Store as float and empty fpu stack 330 __ fstp_s(Address(rsp, 0)); 331 // and reload 332 __ movflt(xmm0, Address(rsp, 0)); 333 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 334 __ movdbl(xmm0, Address(rsp, 0)); 335 } else { 336 // restore ST0 337 __ fld_d(Address(rsp, 0)); 338 } 339 // and pop the temp 340 __ addptr(rsp, 2 * wordSize); 341 __ push(t); // restore return address 342 } 343 break; 344 #else 345 case T_FLOAT : /* nothing to do */ break; 346 case T_DOUBLE : /* nothing to do */ break; 347 #endif // _LP64 348 349 case T_OBJECT : 350 // retrieve result from frame 351 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 352 // and verify it 353 __ verify_oop(rax); 354 break; 355 default : ShouldNotReachHere(); 356 } 357 __ ret(0); // return from result handler 358 return entry; 359 } 360 361 address TemplateInterpreterGenerator::generate_safept_entry_for( 362 TosState state, 363 address runtime_entry) { 364 address entry = __ pc(); 365 __ push(state); 366 __ call_VM(noreg, runtime_entry); 367 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 368 return entry; 369 } 370 371 372 373 // Helpers for commoning out cases in the various type of method entries. 374 // 375 376 377 // increment invocation count & check for overflow 378 // 379 // Note: checking for negative value instead of overflow 380 // so we have a 'sticky' overflow test 381 // 382 // rbx: method 383 // rcx: invocation counter 384 // 385 void TemplateInterpreterGenerator::generate_counter_incr( 386 Label* overflow, 387 Label* profile_method, 388 Label* profile_method_continue) { 389 Label done; 390 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 391 if (TieredCompilation) { 392 int increment = InvocationCounter::count_increment; 393 Label no_mdo; 394 if (ProfileInterpreter) { 395 // Are we profiling? 396 __ movptr(rax, Address(rbx, Method::method_data_offset())); 397 __ testptr(rax, rax); 398 __ jccb(Assembler::zero, no_mdo); 399 // Increment counter in the MDO 400 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 401 in_bytes(InvocationCounter::counter_offset())); 402 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 403 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 404 __ jmp(done); 405 } 406 __ bind(no_mdo); 407 // Increment counter in MethodCounters 408 const Address invocation_counter(rax, 409 MethodCounters::invocation_counter_offset() + 410 InvocationCounter::counter_offset()); 411 __ get_method_counters(rbx, rax, done); 412 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 413 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 414 false, Assembler::zero, overflow); 415 __ bind(done); 416 } else { // not TieredCompilation 417 const Address backedge_counter(rax, 418 MethodCounters::backedge_counter_offset() + 419 InvocationCounter::counter_offset()); 420 const Address invocation_counter(rax, 421 MethodCounters::invocation_counter_offset() + 422 InvocationCounter::counter_offset()); 423 424 __ get_method_counters(rbx, rax, done); 425 426 if (ProfileInterpreter) { 427 __ incrementl(Address(rax, 428 MethodCounters::interpreter_invocation_counter_offset())); 429 } 430 // Update standard invocation counters 431 __ movl(rcx, invocation_counter); 432 __ incrementl(rcx, InvocationCounter::count_increment); 433 __ movl(invocation_counter, rcx); // save invocation count 434 435 __ movl(rax, backedge_counter); // load backedge counter 436 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 437 438 __ addl(rcx, rax); // add both counters 439 440 // profile_method is non-null only for interpreted method so 441 // profile_method != NULL == !native_call 442 443 if (ProfileInterpreter && profile_method != NULL) { 444 // Test to see if we should create a method data oop 445 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 446 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 447 __ jcc(Assembler::less, *profile_method_continue); 448 449 // if no method data exists, go to profile_method 450 __ test_method_data_pointer(rax, *profile_method); 451 } 452 453 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 454 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 455 __ jcc(Assembler::aboveEqual, *overflow); 456 __ bind(done); 457 } 458 } 459 460 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 461 462 // Asm interpreter on entry 463 // r14/rdi - locals 464 // r13/rsi - bcp 465 // rbx - method 466 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 467 // rbp - interpreter frame 468 469 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 470 // Everything as it was on entry 471 // rdx is not restored. Doesn't appear to really be set. 472 473 // InterpreterRuntime::frequency_counter_overflow takes two 474 // arguments, the first (thread) is passed by call_VM, the second 475 // indicates if the counter overflow occurs at a backwards branch 476 // (NULL bcp). We pass zero for it. The call returns the address 477 // of the verified entry point for the method or NULL if the 478 // compilation did not complete (either went background or bailed 479 // out). 480 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 481 __ movl(rarg, 0); 482 __ call_VM(noreg, 483 CAST_FROM_FN_PTR(address, 484 InterpreterRuntime::frequency_counter_overflow), 485 rarg); 486 487 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 488 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 489 // and jump to the interpreted entry. 490 __ jmp(do_continue, relocInfo::none); 491 } 492 493 // See if we've got enough room on the stack for locals plus overhead below 494 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 495 // without going through the signal handler, i.e., reserved and yellow zones 496 // will not be made usable. The shadow zone must suffice to handle the 497 // overflow. 498 // The expression stack grows down incrementally, so the normal guard 499 // page mechanism will work for that. 500 // 501 // NOTE: Since the additional locals are also always pushed (wasn't 502 // obvious in generate_fixed_frame) so the guard should work for them 503 // too. 504 // 505 // Args: 506 // rdx: number of additional locals this frame needs (what we must check) 507 // rbx: Method* 508 // 509 // Kills: 510 // rax 511 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 512 513 // monitor entry size: see picture of stack in frame_x86.hpp 514 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 515 516 // total overhead size: entry_size + (saved rbp through expr stack 517 // bottom). be sure to change this if you add/subtract anything 518 // to/from the overhead area 519 const int overhead_size = 520 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 521 522 const int page_size = os::vm_page_size(); 523 524 Label after_frame_check; 525 526 // see if the frame is greater than one page in size. If so, 527 // then we need to verify there is enough stack space remaining 528 // for the additional locals. 529 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 530 __ jcc(Assembler::belowEqual, after_frame_check); 531 532 // compute rsp as if this were going to be the last frame on 533 // the stack before the red zone 534 535 Label after_frame_check_pop; 536 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 537 #ifndef _LP64 538 __ push(thread); 539 __ get_thread(thread); 540 #endif 541 542 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 543 544 // locals + overhead, in bytes 545 __ mov(rax, rdx); 546 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 547 __ addptr(rax, overhead_size); 548 549 #ifdef ASSERT 550 Label limit_okay; 551 // Verify that thread stack overflow limit is non-zero. 552 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 553 __ jcc(Assembler::notEqual, limit_okay); 554 __ stop("stack overflow limit is zero"); 555 __ bind(limit_okay); 556 #endif 557 558 // Add locals/frame size to stack limit. 559 __ addptr(rax, stack_limit); 560 561 // Check against the current stack bottom. 562 __ cmpptr(rsp, rax); 563 564 __ jcc(Assembler::above, after_frame_check_pop); 565 NOT_LP64(__ pop(rsi)); // get saved bcp 566 567 // Restore sender's sp as SP. This is necessary if the sender's 568 // frame is an extended compiled frame (see gen_c2i_adapter()) 569 // and safer anyway in case of JSR292 adaptations. 570 571 __ pop(rax); // return address must be moved if SP is changed 572 __ mov(rsp, rbcp); 573 __ push(rax); 574 575 // Note: the restored frame is not necessarily interpreted. 576 // Use the shared runtime version of the StackOverflowError. 577 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 578 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 579 // all done with frame size check 580 __ bind(after_frame_check_pop); 581 NOT_LP64(__ pop(rsi)); 582 583 // all done with frame size check 584 __ bind(after_frame_check); 585 } 586 587 // Allocate monitor and lock method (asm interpreter) 588 // 589 // Args: 590 // rbx: Method* 591 // r14/rdi: locals 592 // 593 // Kills: 594 // rax 595 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 596 // rscratch1, rscratch2 (scratch regs) 597 void TemplateInterpreterGenerator::lock_method() { 598 // synchronize method 599 const Address access_flags(rbx, Method::access_flags_offset()); 600 const Address monitor_block_top( 601 rbp, 602 frame::interpreter_frame_monitor_block_top_offset * wordSize); 603 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 604 605 #ifdef ASSERT 606 { 607 Label L; 608 __ movl(rax, access_flags); 609 __ testl(rax, JVM_ACC_SYNCHRONIZED); 610 __ jcc(Assembler::notZero, L); 611 __ stop("method doesn't need synchronization"); 612 __ bind(L); 613 } 614 #endif // ASSERT 615 616 // get synchronization object 617 { 618 Label done; 619 __ movl(rax, access_flags); 620 __ testl(rax, JVM_ACC_STATIC); 621 // get receiver (assume this is frequent case) 622 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 623 __ jcc(Assembler::zero, done); 624 __ load_mirror(rax, rbx); 625 626 #ifdef ASSERT 627 { 628 Label L; 629 __ testptr(rax, rax); 630 __ jcc(Assembler::notZero, L); 631 __ stop("synchronization object is NULL"); 632 __ bind(L); 633 } 634 #endif // ASSERT 635 636 __ bind(done); 637 __ resolve_for_write(OOP_NOT_NULL, rax); 638 } 639 640 // add space for monitor & lock 641 __ subptr(rsp, entry_size); // add space for a monitor entry 642 __ movptr(monitor_block_top, rsp); // set new monitor block top 643 // store object 644 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 645 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 646 __ movptr(lockreg, rsp); // object address 647 __ lock_object(lockreg); 648 } 649 650 // Generate a fixed interpreter frame. This is identical setup for 651 // interpreted methods and for native methods hence the shared code. 652 // 653 // Args: 654 // rax: return address 655 // rbx: Method* 656 // r14/rdi: pointer to locals 657 // r13/rsi: sender sp 658 // rdx: cp cache 659 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 660 // initialize fixed part of activation frame 661 __ push(rax); // save return address 662 __ enter(); // save old & set new rbp 663 __ push(rbcp); // set sender sp 664 __ push((int)NULL_WORD); // leave last_sp as null 665 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 666 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 667 __ push(rbx); // save Method* 668 // Get mirror and store it in the frame as GC root for this Method* 669 __ load_mirror(rdx, rbx); 670 __ push(rdx); 671 if (ProfileInterpreter) { 672 Label method_data_continue; 673 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 674 __ testptr(rdx, rdx); 675 __ jcc(Assembler::zero, method_data_continue); 676 __ addptr(rdx, in_bytes(MethodData::data_offset())); 677 __ bind(method_data_continue); 678 __ push(rdx); // set the mdp (method data pointer) 679 } else { 680 __ push(0); 681 } 682 683 __ movptr(rdx, Address(rbx, Method::const_offset())); 684 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 685 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 686 __ push(rdx); // set constant pool cache 687 __ push(rlocals); // set locals pointer 688 if (native_call) { 689 __ push(0); // no bcp 690 } else { 691 __ push(rbcp); // set bcp 692 } 693 __ push(0); // reserve word for pointer to expression stack bottom 694 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 695 } 696 697 // End of helpers 698 699 // Method entry for java.lang.ref.Reference.get. 700 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 701 // Code: _aload_0, _getfield, _areturn 702 // parameter size = 1 703 // 704 // The code that gets generated by this routine is split into 2 parts: 705 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 706 // 2. The slow path - which is an expansion of the regular method entry. 707 // 708 // Notes:- 709 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 710 // * We may jump to the slow path iff the receiver is null. If the 711 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 712 // Thus we can use the regular method entry code to generate the NPE. 713 // 714 // rbx: Method* 715 716 // r13: senderSP must preserve for slow path, set SP to it on fast path 717 718 address entry = __ pc(); 719 720 const int referent_offset = java_lang_ref_Reference::referent_offset; 721 guarantee(referent_offset > 0, "referent offset not initialized"); 722 723 Label slow_path; 724 // rbx: method 725 726 // Check if local 0 != NULL 727 // If the receiver is null then it is OK to jump to the slow path. 728 __ movptr(rax, Address(rsp, wordSize)); 729 730 __ testptr(rax, rax); 731 __ jcc(Assembler::zero, slow_path); 732 733 // rax: local 0 734 // rbx: method (but can be used as scratch now) 735 // rdx: scratch 736 // rdi: scratch 737 738 // Preserve the sender sp in case the load barrier 739 // calls the runtime 740 NOT_LP64(__ push(rsi)); 741 742 // Load the value of the referent field. 743 const Address field_address(rax, referent_offset); 744 __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); 745 746 // _areturn 747 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 748 NOT_LP64(__ pop(rsi)); // get sender sp 749 __ pop(rdi); // get return address 750 __ mov(rsp, sender_sp); // set sp to sender sp 751 __ jmp(rdi); 752 __ ret(0); 753 754 // generate a vanilla interpreter entry as the slow path 755 __ bind(slow_path); 756 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 757 return entry; 758 } 759 760 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 761 // Quick & dirty stack overflow checking: bang the stack & handle trap. 762 // Note that we do the banging after the frame is setup, since the exception 763 // handling code expects to find a valid interpreter frame on the stack. 764 // Doing the banging earlier fails if the caller frame is not an interpreter 765 // frame. 766 // (Also, the exception throwing code expects to unlock any synchronized 767 // method receiever, so do the banging after locking the receiver.) 768 769 // Bang each page in the shadow zone. We can't assume it's been done for 770 // an interpreter frame with greater than a page of locals, so each page 771 // needs to be checked. Only true for non-native. 772 if (UseStackBanging) { 773 const int page_size = os::vm_page_size(); 774 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 775 const int start_page = native_call ? n_shadow_pages : 1; 776 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 777 __ bang_stack_with_offset(pages*page_size); 778 } 779 } 780 } 781 782 // Interpreter stub for calling a native method. (asm interpreter) 783 // This sets up a somewhat different looking stack for calling the 784 // native method than the typical interpreter frame setup. 785 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 786 // determine code generation flags 787 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 788 789 // rbx: Method* 790 // rbcp: sender sp 791 792 address entry_point = __ pc(); 793 794 const Address constMethod (rbx, Method::const_offset()); 795 const Address access_flags (rbx, Method::access_flags_offset()); 796 const Address size_of_parameters(rcx, ConstMethod:: 797 size_of_parameters_offset()); 798 799 800 // get parameter size (always needed) 801 __ movptr(rcx, constMethod); 802 __ load_unsigned_short(rcx, size_of_parameters); 803 804 // native calls don't need the stack size check since they have no 805 // expression stack and the arguments are already on the stack and 806 // we only add a handful of words to the stack 807 808 // rbx: Method* 809 // rcx: size of parameters 810 // rbcp: sender sp 811 __ pop(rax); // get return address 812 813 // for natives the size of locals is zero 814 815 // compute beginning of parameters 816 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 817 818 // add 2 zero-initialized slots for native calls 819 // initialize result_handler slot 820 __ push((int) NULL_WORD); 821 // slot for oop temp 822 // (static native method holder mirror/jni oop result) 823 __ push((int) NULL_WORD); 824 825 // initialize fixed part of activation frame 826 generate_fixed_frame(true); 827 828 // make sure method is native & not abstract 829 #ifdef ASSERT 830 __ movl(rax, access_flags); 831 { 832 Label L; 833 __ testl(rax, JVM_ACC_NATIVE); 834 __ jcc(Assembler::notZero, L); 835 __ stop("tried to execute non-native method as native"); 836 __ bind(L); 837 } 838 { 839 Label L; 840 __ testl(rax, JVM_ACC_ABSTRACT); 841 __ jcc(Assembler::zero, L); 842 __ stop("tried to execute abstract method in interpreter"); 843 __ bind(L); 844 } 845 #endif 846 847 // Since at this point in the method invocation the exception handler 848 // would try to exit the monitor of synchronized methods which hasn't 849 // been entered yet, we set the thread local variable 850 // _do_not_unlock_if_synchronized to true. The remove_activation will 851 // check this flag. 852 853 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 854 NOT_LP64(__ get_thread(thread1)); 855 const Address do_not_unlock_if_synchronized(thread1, 856 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 857 __ movbool(do_not_unlock_if_synchronized, true); 858 859 // increment invocation count & check for overflow 860 Label invocation_counter_overflow; 861 if (inc_counter) { 862 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 863 } 864 865 Label continue_after_compile; 866 __ bind(continue_after_compile); 867 868 bang_stack_shadow_pages(true); 869 870 // reset the _do_not_unlock_if_synchronized flag 871 NOT_LP64(__ get_thread(thread1)); 872 __ movbool(do_not_unlock_if_synchronized, false); 873 874 // check for synchronized methods 875 // Must happen AFTER invocation_counter check and stack overflow check, 876 // so method is not locked if overflows. 877 if (synchronized) { 878 lock_method(); 879 } else { 880 // no synchronization necessary 881 #ifdef ASSERT 882 { 883 Label L; 884 __ movl(rax, access_flags); 885 __ testl(rax, JVM_ACC_SYNCHRONIZED); 886 __ jcc(Assembler::zero, L); 887 __ stop("method needs synchronization"); 888 __ bind(L); 889 } 890 #endif 891 } 892 893 // start execution 894 #ifdef ASSERT 895 { 896 Label L; 897 const Address monitor_block_top(rbp, 898 frame::interpreter_frame_monitor_block_top_offset * wordSize); 899 __ movptr(rax, monitor_block_top); 900 __ cmpptr(rax, rsp); 901 __ jcc(Assembler::equal, L); 902 __ stop("broken stack frame setup in interpreter"); 903 __ bind(L); 904 } 905 #endif 906 907 // jvmti support 908 __ notify_method_entry(); 909 910 // work registers 911 const Register method = rbx; 912 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 913 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 914 915 // allocate space for parameters 916 __ get_method(method); 917 __ movptr(t, Address(method, Method::const_offset())); 918 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 919 920 #ifndef _LP64 921 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 922 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 923 __ subptr(rsp, t); 924 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 925 #else 926 __ shll(t, Interpreter::logStackElementSize); 927 928 __ subptr(rsp, t); 929 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 930 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 931 #endif // _LP64 932 933 // get signature handler 934 { 935 Label L; 936 __ movptr(t, Address(method, Method::signature_handler_offset())); 937 __ testptr(t, t); 938 __ jcc(Assembler::notZero, L); 939 __ call_VM(noreg, 940 CAST_FROM_FN_PTR(address, 941 InterpreterRuntime::prepare_native_call), 942 method); 943 __ get_method(method); 944 __ movptr(t, Address(method, Method::signature_handler_offset())); 945 __ bind(L); 946 } 947 948 // call signature handler 949 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 950 "adjust this code"); 951 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 952 "adjust this code"); 953 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 954 "adjust this code"); 955 956 // The generated handlers do not touch RBX (the method oop). 957 // However, large signatures cannot be cached and are generated 958 // each time here. The slow-path generator can do a GC on return, 959 // so we must reload it after the call. 960 __ call(t); 961 __ get_method(method); // slow path can do a GC, reload RBX 962 963 964 // result handler is in rax 965 // set result handler 966 __ movptr(Address(rbp, 967 (frame::interpreter_frame_result_handler_offset) * wordSize), 968 rax); 969 970 // pass mirror handle if static call 971 { 972 Label L; 973 __ movl(t, Address(method, Method::access_flags_offset())); 974 __ testl(t, JVM_ACC_STATIC); 975 __ jcc(Assembler::zero, L); 976 // get mirror 977 __ load_mirror(t, method, rax); 978 // copy mirror into activation frame 979 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 980 t); 981 // pass handle to mirror 982 #ifndef _LP64 983 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 984 __ movptr(Address(rsp, wordSize), t); 985 #else 986 __ lea(c_rarg1, 987 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 988 #endif // _LP64 989 __ bind(L); 990 } 991 992 // get native function entry point 993 { 994 Label L; 995 __ movptr(rax, Address(method, Method::native_function_offset())); 996 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 997 __ cmpptr(rax, unsatisfied.addr()); 998 __ jcc(Assembler::notEqual, L); 999 __ call_VM(noreg, 1000 CAST_FROM_FN_PTR(address, 1001 InterpreterRuntime::prepare_native_call), 1002 method); 1003 __ get_method(method); 1004 __ movptr(rax, Address(method, Method::native_function_offset())); 1005 __ bind(L); 1006 } 1007 1008 // pass JNIEnv 1009 #ifndef _LP64 1010 __ get_thread(thread); 1011 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1012 __ movptr(Address(rsp, 0), t); 1013 1014 // set_last_Java_frame_before_call 1015 // It is enough that the pc() 1016 // points into the right code segment. It does not have to be the correct return pc. 1017 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1018 #else 1019 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1020 1021 // It is enough that the pc() points into the right code 1022 // segment. It does not have to be the correct return pc. 1023 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1024 #endif // _LP64 1025 1026 // change thread state 1027 #ifdef ASSERT 1028 { 1029 Label L; 1030 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1031 __ cmpl(t, _thread_in_Java); 1032 __ jcc(Assembler::equal, L); 1033 __ stop("Wrong thread state in native stub"); 1034 __ bind(L); 1035 } 1036 #endif 1037 1038 // Change state to native 1039 1040 __ movl(Address(thread, JavaThread::thread_state_offset()), 1041 _thread_in_native); 1042 1043 // Call the native method. 1044 __ call(rax); 1045 // 32: result potentially in rdx:rax or ST0 1046 // 64: result potentially in rax or xmm0 1047 1048 // Verify or restore cpu control state after JNI call 1049 __ restore_cpu_control_state_after_jni(); 1050 1051 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1052 // in order to extract the result of a method call. If the order of these 1053 // pushes change or anything else is added to the stack then the code in 1054 // interpreter_frame_result must also change. 1055 1056 #ifndef _LP64 1057 // save potential result in ST(0) & rdx:rax 1058 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1059 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1060 // It is safe to do this push because state is _thread_in_native and return address will be found 1061 // via _last_native_pc and not via _last_jave_sp 1062 1063 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1064 // If the order changes or anything else is added to the stack the code in 1065 // interpreter_frame_result will have to be changed. 1066 1067 { Label L; 1068 Label push_double; 1069 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1070 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1071 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1072 float_handler.addr()); 1073 __ jcc(Assembler::equal, push_double); 1074 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1075 double_handler.addr()); 1076 __ jcc(Assembler::notEqual, L); 1077 __ bind(push_double); 1078 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1079 __ bind(L); 1080 } 1081 #else 1082 __ push(dtos); 1083 #endif // _LP64 1084 1085 __ push(ltos); 1086 1087 // change thread state 1088 NOT_LP64(__ get_thread(thread)); 1089 __ movl(Address(thread, JavaThread::thread_state_offset()), 1090 _thread_in_native_trans); 1091 1092 if (os::is_MP()) { 1093 if (UseMembar) { 1094 // Force this write out before the read below 1095 __ membar(Assembler::Membar_mask_bits( 1096 Assembler::LoadLoad | Assembler::LoadStore | 1097 Assembler::StoreLoad | Assembler::StoreStore)); 1098 } else { 1099 // Write serialization page so VM thread can do a pseudo remote membar. 1100 // We use the current thread pointer to calculate a thread specific 1101 // offset to write to within the page. This minimizes bus traffic 1102 // due to cache line collision. 1103 __ serialize_memory(thread, rcx); 1104 } 1105 } 1106 1107 #ifndef _LP64 1108 if (AlwaysRestoreFPU) { 1109 // Make sure the control word is correct. 1110 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1111 } 1112 #endif // _LP64 1113 1114 // check for safepoint operation in progress and/or pending suspend requests 1115 { 1116 Label Continue; 1117 Label slow_path; 1118 1119 #ifndef _LP64 1120 __ safepoint_poll(slow_path, thread, noreg); 1121 #else 1122 __ safepoint_poll(slow_path, r15_thread, rscratch1); 1123 #endif 1124 1125 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1126 __ jcc(Assembler::equal, Continue); 1127 __ bind(slow_path); 1128 1129 // Don't use call_VM as it will see a possible pending exception 1130 // and forward it and never return here preventing us from 1131 // clearing _last_native_pc down below. Also can't use 1132 // call_VM_leaf either as it will check to see if r13 & r14 are 1133 // preserved and correspond to the bcp/locals pointers. So we do a 1134 // runtime call by hand. 1135 // 1136 #ifndef _LP64 1137 __ push(thread); 1138 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1139 JavaThread::check_special_condition_for_native_trans))); 1140 __ increment(rsp, wordSize); 1141 __ get_thread(thread); 1142 #else 1143 __ mov(c_rarg0, r15_thread); 1144 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1145 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1146 __ andptr(rsp, -16); // align stack as required by ABI 1147 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1148 __ mov(rsp, r12); // restore sp 1149 __ reinit_heapbase(); 1150 #endif // _LP64 1151 __ bind(Continue); 1152 } 1153 1154 // change thread state 1155 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1156 1157 // reset_last_Java_frame 1158 __ reset_last_Java_frame(thread, true); 1159 1160 if (CheckJNICalls) { 1161 // clear_pending_jni_exception_check 1162 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1163 } 1164 1165 // reset handle block 1166 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1167 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1168 1169 // If result is an oop unbox and store it in frame where gc will see it 1170 // and result handler will pick it up 1171 1172 { 1173 Label no_oop, not_weak, store_result; 1174 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1175 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1176 __ jcc(Assembler::notEqual, no_oop); 1177 // retrieve result 1178 __ pop(ltos); 1179 // Unbox oop result, e.g. JNIHandles::resolve value. 1180 __ resolve_jobject(rax /* value */, 1181 thread /* thread */, 1182 t /* tmp */); 1183 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1184 // keep stack depth as expected by pushing oop which will eventually be discarded 1185 __ push(ltos); 1186 __ bind(no_oop); 1187 } 1188 1189 1190 { 1191 Label no_reguard; 1192 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1193 JavaThread::stack_guard_yellow_reserved_disabled); 1194 __ jcc(Assembler::notEqual, no_reguard); 1195 1196 __ pusha(); // XXX only save smashed registers 1197 #ifndef _LP64 1198 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1199 __ popa(); 1200 #else 1201 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1202 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1203 __ andptr(rsp, -16); // align stack as required by ABI 1204 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1205 __ mov(rsp, r12); // restore sp 1206 __ popa(); // XXX only restore smashed registers 1207 __ reinit_heapbase(); 1208 #endif // _LP64 1209 1210 __ bind(no_reguard); 1211 } 1212 1213 1214 // The method register is junk from after the thread_in_native transition 1215 // until here. Also can't call_VM until the bcp has been 1216 // restored. Need bcp for throwing exception below so get it now. 1217 __ get_method(method); 1218 1219 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1220 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1221 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1222 1223 // handle exceptions (exception handling will handle unlocking!) 1224 { 1225 Label L; 1226 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1227 __ jcc(Assembler::zero, L); 1228 // Note: At some point we may want to unify this with the code 1229 // used in call_VM_base(); i.e., we should use the 1230 // StubRoutines::forward_exception code. For now this doesn't work 1231 // here because the rsp is not correctly set at this point. 1232 __ MacroAssembler::call_VM(noreg, 1233 CAST_FROM_FN_PTR(address, 1234 InterpreterRuntime::throw_pending_exception)); 1235 __ should_not_reach_here(); 1236 __ bind(L); 1237 } 1238 1239 // do unlocking if necessary 1240 { 1241 Label L; 1242 __ movl(t, Address(method, Method::access_flags_offset())); 1243 __ testl(t, JVM_ACC_SYNCHRONIZED); 1244 __ jcc(Assembler::zero, L); 1245 // the code below should be shared with interpreter macro 1246 // assembler implementation 1247 { 1248 Label unlock; 1249 // BasicObjectLock will be first in list, since this is a 1250 // synchronized method. However, need to check that the object 1251 // has not been unlocked by an explicit monitorexit bytecode. 1252 const Address monitor(rbp, 1253 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1254 wordSize - (int)sizeof(BasicObjectLock))); 1255 1256 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1257 1258 // monitor expect in c_rarg1 for slow unlock path 1259 __ lea(regmon, monitor); // address of first monitor 1260 1261 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1262 __ testptr(t, t); 1263 __ jcc(Assembler::notZero, unlock); 1264 1265 // Entry already unlocked, need to throw exception 1266 __ MacroAssembler::call_VM(noreg, 1267 CAST_FROM_FN_PTR(address, 1268 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1269 __ should_not_reach_here(); 1270 1271 __ bind(unlock); 1272 __ unlock_object(regmon); 1273 } 1274 __ bind(L); 1275 } 1276 1277 // jvmti support 1278 // Note: This must happen _after_ handling/throwing any exceptions since 1279 // the exception handler code notifies the runtime of method exits 1280 // too. If this happens before, method entry/exit notifications are 1281 // not properly paired (was bug - gri 11/22/99). 1282 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1283 1284 // restore potential result in edx:eax, call result handler to 1285 // restore potential result in ST0 & handle result 1286 1287 __ pop(ltos); 1288 LP64_ONLY( __ pop(dtos)); 1289 1290 __ movptr(t, Address(rbp, 1291 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1292 __ call(t); 1293 1294 // remove activation 1295 __ movptr(t, Address(rbp, 1296 frame::interpreter_frame_sender_sp_offset * 1297 wordSize)); // get sender sp 1298 __ leave(); // remove frame anchor 1299 __ pop(rdi); // get return address 1300 __ mov(rsp, t); // set sp to sender sp 1301 __ jmp(rdi); 1302 1303 if (inc_counter) { 1304 // Handle overflow of counter and compile method 1305 __ bind(invocation_counter_overflow); 1306 generate_counter_overflow(continue_after_compile); 1307 } 1308 1309 return entry_point; 1310 } 1311 1312 // Abstract method entry 1313 // Attempt to execute abstract method. Throw exception 1314 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1315 1316 address entry_point = __ pc(); 1317 1318 // abstract method entry 1319 1320 // pop return address, reset last_sp to NULL 1321 __ empty_expression_stack(); 1322 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1323 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1324 1325 // throw exception 1326 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); 1327 // the call_VM checks for exception, so we should never return here. 1328 __ should_not_reach_here(); 1329 1330 return entry_point; 1331 } 1332 1333 // 1334 // Generic interpreted method entry to (asm) interpreter 1335 // 1336 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1337 // determine code generation flags 1338 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1339 1340 // ebx: Method* 1341 // rbcp: sender sp 1342 address entry_point = __ pc(); 1343 1344 const Address constMethod(rbx, Method::const_offset()); 1345 const Address access_flags(rbx, Method::access_flags_offset()); 1346 const Address size_of_parameters(rdx, 1347 ConstMethod::size_of_parameters_offset()); 1348 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1349 1350 1351 // get parameter size (always needed) 1352 __ movptr(rdx, constMethod); 1353 __ load_unsigned_short(rcx, size_of_parameters); 1354 1355 // rbx: Method* 1356 // rcx: size of parameters 1357 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1358 1359 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1360 __ subl(rdx, rcx); // rdx = no. of additional locals 1361 1362 // YYY 1363 // __ incrementl(rdx); 1364 // __ andl(rdx, -2); 1365 1366 // see if we've got enough room on the stack for locals plus overhead. 1367 generate_stack_overflow_check(); 1368 1369 // get return address 1370 __ pop(rax); 1371 1372 // compute beginning of parameters 1373 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1374 1375 // rdx - # of additional locals 1376 // allocate space for locals 1377 // explicitly initialize locals 1378 { 1379 Label exit, loop; 1380 __ testl(rdx, rdx); 1381 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1382 __ bind(loop); 1383 __ push((int) NULL_WORD); // initialize local variables 1384 __ decrementl(rdx); // until everything initialized 1385 __ jcc(Assembler::greater, loop); 1386 __ bind(exit); 1387 } 1388 1389 // initialize fixed part of activation frame 1390 generate_fixed_frame(false); 1391 1392 // make sure method is not native & not abstract 1393 #ifdef ASSERT 1394 __ movl(rax, access_flags); 1395 { 1396 Label L; 1397 __ testl(rax, JVM_ACC_NATIVE); 1398 __ jcc(Assembler::zero, L); 1399 __ stop("tried to execute native method as non-native"); 1400 __ bind(L); 1401 } 1402 { 1403 Label L; 1404 __ testl(rax, JVM_ACC_ABSTRACT); 1405 __ jcc(Assembler::zero, L); 1406 __ stop("tried to execute abstract method in interpreter"); 1407 __ bind(L); 1408 } 1409 #endif 1410 1411 // Since at this point in the method invocation the exception 1412 // handler would try to exit the monitor of synchronized methods 1413 // which hasn't been entered yet, we set the thread local variable 1414 // _do_not_unlock_if_synchronized to true. The remove_activation 1415 // will check this flag. 1416 1417 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1418 NOT_LP64(__ get_thread(thread)); 1419 const Address do_not_unlock_if_synchronized(thread, 1420 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1421 __ movbool(do_not_unlock_if_synchronized, true); 1422 1423 __ profile_parameters_type(rax, rcx, rdx); 1424 // increment invocation count & check for overflow 1425 Label invocation_counter_overflow; 1426 Label profile_method; 1427 Label profile_method_continue; 1428 if (inc_counter) { 1429 generate_counter_incr(&invocation_counter_overflow, 1430 &profile_method, 1431 &profile_method_continue); 1432 if (ProfileInterpreter) { 1433 __ bind(profile_method_continue); 1434 } 1435 } 1436 1437 Label continue_after_compile; 1438 __ bind(continue_after_compile); 1439 1440 // check for synchronized interpreted methods 1441 bang_stack_shadow_pages(false); 1442 1443 // reset the _do_not_unlock_if_synchronized flag 1444 NOT_LP64(__ get_thread(thread)); 1445 __ movbool(do_not_unlock_if_synchronized, false); 1446 1447 // check for synchronized methods 1448 // Must happen AFTER invocation_counter check and stack overflow check, 1449 // so method is not locked if overflows. 1450 if (synchronized) { 1451 // Allocate monitor and lock method 1452 lock_method(); 1453 } else { 1454 // no synchronization necessary 1455 #ifdef ASSERT 1456 { 1457 Label L; 1458 __ movl(rax, access_flags); 1459 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1460 __ jcc(Assembler::zero, L); 1461 __ stop("method needs synchronization"); 1462 __ bind(L); 1463 } 1464 #endif 1465 } 1466 1467 // start execution 1468 #ifdef ASSERT 1469 { 1470 Label L; 1471 const Address monitor_block_top (rbp, 1472 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1473 __ movptr(rax, monitor_block_top); 1474 __ cmpptr(rax, rsp); 1475 __ jcc(Assembler::equal, L); 1476 __ stop("broken stack frame setup in interpreter"); 1477 __ bind(L); 1478 } 1479 #endif 1480 1481 // jvmti support 1482 __ notify_method_entry(); 1483 1484 __ dispatch_next(vtos); 1485 1486 // invocation counter overflow 1487 if (inc_counter) { 1488 if (ProfileInterpreter) { 1489 // We have decided to profile this method in the interpreter 1490 __ bind(profile_method); 1491 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1492 __ set_method_data_pointer_for_bcp(); 1493 __ get_method(rbx); 1494 __ jmp(profile_method_continue); 1495 } 1496 // Handle overflow of counter and compile method 1497 __ bind(invocation_counter_overflow); 1498 generate_counter_overflow(continue_after_compile); 1499 } 1500 1501 return entry_point; 1502 } 1503 1504 //----------------------------------------------------------------------------- 1505 // Exceptions 1506 1507 void TemplateInterpreterGenerator::generate_throw_exception() { 1508 // Entry point in previous activation (i.e., if the caller was 1509 // interpreted) 1510 Interpreter::_rethrow_exception_entry = __ pc(); 1511 // Restore sp to interpreter_frame_last_sp even though we are going 1512 // to empty the expression stack for the exception processing. 1513 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1514 // rax: exception 1515 // rdx: return address/pc that threw exception 1516 __ restore_bcp(); // r13/rsi points to call/send 1517 __ restore_locals(); 1518 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1519 // Entry point for exceptions thrown within interpreter code 1520 Interpreter::_throw_exception_entry = __ pc(); 1521 // expression stack is undefined here 1522 // rax: exception 1523 // r13/rsi: exception bcp 1524 __ verify_oop(rax); 1525 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1526 LP64_ONLY(__ mov(c_rarg1, rax)); 1527 1528 // expression stack must be empty before entering the VM in case of 1529 // an exception 1530 __ empty_expression_stack(); 1531 // find exception handler address and preserve exception oop 1532 __ call_VM(rdx, 1533 CAST_FROM_FN_PTR(address, 1534 InterpreterRuntime::exception_handler_for_exception), 1535 rarg); 1536 // rax: exception handler entry point 1537 // rdx: preserved exception oop 1538 // r13/rsi: bcp for exception handler 1539 __ push_ptr(rdx); // push exception which is now the only value on the stack 1540 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1541 1542 // If the exception is not handled in the current frame the frame is 1543 // removed and the exception is rethrown (i.e. exception 1544 // continuation is _rethrow_exception). 1545 // 1546 // Note: At this point the bci is still the bxi for the instruction 1547 // which caused the exception and the expression stack is 1548 // empty. Thus, for any VM calls at this point, GC will find a legal 1549 // oop map (with empty expression stack). 1550 1551 // In current activation 1552 // tos: exception 1553 // esi: exception bcp 1554 1555 // 1556 // JVMTI PopFrame support 1557 // 1558 1559 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1560 __ empty_expression_stack(); 1561 // Set the popframe_processing bit in pending_popframe_condition 1562 // indicating that we are currently handling popframe, so that 1563 // call_VMs that may happen later do not trigger new popframe 1564 // handling cycles. 1565 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1566 NOT_LP64(__ get_thread(thread)); 1567 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1568 __ orl(rdx, JavaThread::popframe_processing_bit); 1569 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1570 1571 { 1572 // Check to see whether we are returning to a deoptimized frame. 1573 // (The PopFrame call ensures that the caller of the popped frame is 1574 // either interpreted or compiled and deoptimizes it if compiled.) 1575 // In this case, we can't call dispatch_next() after the frame is 1576 // popped, but instead must save the incoming arguments and restore 1577 // them after deoptimization has occurred. 1578 // 1579 // Note that we don't compare the return PC against the 1580 // deoptimization blob's unpack entry because of the presence of 1581 // adapter frames in C2. 1582 Label caller_not_deoptimized; 1583 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1584 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1585 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1586 InterpreterRuntime::interpreter_contains), rarg); 1587 __ testl(rax, rax); 1588 __ jcc(Assembler::notZero, caller_not_deoptimized); 1589 1590 // Compute size of arguments for saving when returning to 1591 // deoptimized caller 1592 __ get_method(rax); 1593 __ movptr(rax, Address(rax, Method::const_offset())); 1594 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1595 size_of_parameters_offset()))); 1596 __ shll(rax, Interpreter::logStackElementSize); 1597 __ restore_locals(); 1598 __ subptr(rlocals, rax); 1599 __ addptr(rlocals, wordSize); 1600 // Save these arguments 1601 NOT_LP64(__ get_thread(thread)); 1602 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1603 Deoptimization:: 1604 popframe_preserve_args), 1605 thread, rax, rlocals); 1606 1607 __ remove_activation(vtos, rdx, 1608 /* throw_monitor_exception */ false, 1609 /* install_monitor_exception */ false, 1610 /* notify_jvmdi */ false); 1611 1612 // Inform deoptimization that it is responsible for restoring 1613 // these arguments 1614 NOT_LP64(__ get_thread(thread)); 1615 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1616 JavaThread::popframe_force_deopt_reexecution_bit); 1617 1618 // Continue in deoptimization handler 1619 __ jmp(rdx); 1620 1621 __ bind(caller_not_deoptimized); 1622 } 1623 1624 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1625 /* throw_monitor_exception */ false, 1626 /* install_monitor_exception */ false, 1627 /* notify_jvmdi */ false); 1628 1629 // Finish with popframe handling 1630 // A previous I2C followed by a deoptimization might have moved the 1631 // outgoing arguments further up the stack. PopFrame expects the 1632 // mutations to those outgoing arguments to be preserved and other 1633 // constraints basically require this frame to look exactly as 1634 // though it had previously invoked an interpreted activation with 1635 // no space between the top of the expression stack (current 1636 // last_sp) and the top of stack. Rather than force deopt to 1637 // maintain this kind of invariant all the time we call a small 1638 // fixup routine to move the mutated arguments onto the top of our 1639 // expression stack if necessary. 1640 #ifndef _LP64 1641 __ mov(rax, rsp); 1642 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1643 __ get_thread(thread); 1644 // PC must point into interpreter here 1645 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1646 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1647 __ get_thread(thread); 1648 #else 1649 __ mov(c_rarg1, rsp); 1650 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1651 // PC must point into interpreter here 1652 __ set_last_Java_frame(noreg, rbp, __ pc()); 1653 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1654 #endif 1655 __ reset_last_Java_frame(thread, true); 1656 1657 // Restore the last_sp and null it out 1658 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1659 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1660 1661 __ restore_bcp(); 1662 __ restore_locals(); 1663 // The method data pointer was incremented already during 1664 // call profiling. We have to restore the mdp for the current bcp. 1665 if (ProfileInterpreter) { 1666 __ set_method_data_pointer_for_bcp(); 1667 } 1668 1669 // Clear the popframe condition flag 1670 NOT_LP64(__ get_thread(thread)); 1671 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1672 JavaThread::popframe_inactive); 1673 1674 #if INCLUDE_JVMTI 1675 { 1676 Label L_done; 1677 const Register local0 = rlocals; 1678 1679 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1680 __ jcc(Assembler::notEqual, L_done); 1681 1682 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1683 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1684 1685 __ get_method(rdx); 1686 __ movptr(rax, Address(local0, 0)); 1687 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1688 1689 __ testptr(rax, rax); 1690 __ jcc(Assembler::zero, L_done); 1691 1692 __ movptr(Address(rbx, 0), rax); 1693 __ bind(L_done); 1694 } 1695 #endif // INCLUDE_JVMTI 1696 1697 __ dispatch_next(vtos); 1698 // end of PopFrame support 1699 1700 Interpreter::_remove_activation_entry = __ pc(); 1701 1702 // preserve exception over this code sequence 1703 __ pop_ptr(rax); 1704 NOT_LP64(__ get_thread(thread)); 1705 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1706 // remove the activation (without doing throws on illegalMonitorExceptions) 1707 __ remove_activation(vtos, rdx, false, true, false); 1708 // restore exception 1709 NOT_LP64(__ get_thread(thread)); 1710 __ get_vm_result(rax, thread); 1711 1712 // In between activations - previous activation type unknown yet 1713 // compute continuation point - the continuation point expects the 1714 // following registers set up: 1715 // 1716 // rax: exception 1717 // rdx: return address/pc that threw exception 1718 // rsp: expression stack of caller 1719 // rbp: ebp of caller 1720 __ push(rax); // save exception 1721 __ push(rdx); // save return address 1722 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1723 SharedRuntime::exception_handler_for_return_address), 1724 thread, rdx); 1725 __ mov(rbx, rax); // save exception handler 1726 __ pop(rdx); // restore return address 1727 __ pop(rax); // restore exception 1728 // Note that an "issuing PC" is actually the next PC after the call 1729 __ jmp(rbx); // jump to exception 1730 // handler of caller 1731 } 1732 1733 1734 // 1735 // JVMTI ForceEarlyReturn support 1736 // 1737 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1738 address entry = __ pc(); 1739 1740 __ restore_bcp(); 1741 __ restore_locals(); 1742 __ empty_expression_stack(); 1743 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1744 1745 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1746 NOT_LP64(__ get_thread(thread)); 1747 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1748 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1749 1750 // Clear the earlyret state 1751 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1752 1753 __ remove_activation(state, rsi, 1754 false, /* throw_monitor_exception */ 1755 false, /* install_monitor_exception */ 1756 true); /* notify_jvmdi */ 1757 __ jmp(rsi); 1758 1759 return entry; 1760 } // end of ForceEarlyReturn support 1761 1762 1763 //----------------------------------------------------------------------------- 1764 // Helper for vtos entry point generation 1765 1766 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1767 address& bep, 1768 address& cep, 1769 address& sep, 1770 address& aep, 1771 address& iep, 1772 address& lep, 1773 address& fep, 1774 address& dep, 1775 address& vep) { 1776 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1777 Label L; 1778 aep = __ pc(); __ push_ptr(); __ jmp(L); 1779 #ifndef _LP64 1780 fep = __ pc(); __ push(ftos); __ jmp(L); 1781 dep = __ pc(); __ push(dtos); __ jmp(L); 1782 #else 1783 fep = __ pc(); __ push_f(xmm0); __ jmp(L); 1784 dep = __ pc(); __ push_d(xmm0); __ jmp(L); 1785 #endif // _LP64 1786 lep = __ pc(); __ push_l(); __ jmp(L); 1787 bep = cep = sep = 1788 iep = __ pc(); __ push_i(); 1789 vep = __ pc(); 1790 __ bind(L); 1791 generate_and_dispatch(t); 1792 } 1793 1794 //----------------------------------------------------------------------------- 1795 1796 // Non-product code 1797 #ifndef PRODUCT 1798 1799 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1800 address entry = __ pc(); 1801 1802 #ifndef _LP64 1803 // prepare expression stack 1804 __ pop(rcx); // pop return address so expression stack is 'pure' 1805 __ push(state); // save tosca 1806 1807 // pass tosca registers as arguments & call tracer 1808 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1809 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1810 __ pop(state); // restore tosca 1811 1812 // return 1813 __ jmp(rcx); 1814 #else 1815 __ push(state); 1816 __ push(c_rarg0); 1817 __ push(c_rarg1); 1818 __ push(c_rarg2); 1819 __ push(c_rarg3); 1820 __ mov(c_rarg2, rax); // Pass itos 1821 #ifdef _WIN64 1822 __ movflt(xmm3, xmm0); // Pass ftos 1823 #endif 1824 __ call_VM(noreg, 1825 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1826 c_rarg1, c_rarg2, c_rarg3); 1827 __ pop(c_rarg3); 1828 __ pop(c_rarg2); 1829 __ pop(c_rarg1); 1830 __ pop(c_rarg0); 1831 __ pop(state); 1832 __ ret(0); // return from result handler 1833 #endif // _LP64 1834 1835 return entry; 1836 } 1837 1838 void TemplateInterpreterGenerator::count_bytecode() { 1839 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1840 } 1841 1842 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1843 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1844 } 1845 1846 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1847 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1848 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1849 __ orl(rbx, 1850 ((int) t->bytecode()) << 1851 BytecodePairHistogram::log2_number_of_codes); 1852 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1853 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1854 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1855 } 1856 1857 1858 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1859 // Call a little run-time stub to avoid blow-up for each bytecode. 1860 // The run-time runtime saves the right registers, depending on 1861 // the tosca in-state for the given template. 1862 1863 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1864 "entry must have been generated"); 1865 #ifndef _LP64 1866 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1867 #else 1868 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1869 __ andptr(rsp, -16); // align stack as required by ABI 1870 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1871 __ mov(rsp, r12); // restore sp 1872 __ reinit_heapbase(); 1873 #endif // _LP64 1874 } 1875 1876 1877 void TemplateInterpreterGenerator::stop_interpreter_at() { 1878 Label L; 1879 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1880 StopInterpreterAt); 1881 __ jcc(Assembler::notEqual, L); 1882 __ int3(); 1883 __ bind(L); 1884 } 1885 #endif // !PRODUCT