1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "gc/shared/barrierSetAssembler.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/interp_masm.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "interpreter/templateInterpreterGenerator.hpp" 33 #include "interpreter/templateTable.hpp" 34 #include "oops/arrayOop.hpp" 35 #include "oops/methodData.hpp" 36 #include "oops/method.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/debug.hpp" 49 #include "utilities/macros.hpp" 50 51 #define __ _masm-> 52 53 // Size of interpreter code. Increase if too small. Interpreter will 54 // fail with a guarantee ("not enough space for interpreter generation"); 55 // if too small. 56 // Run with +PrintInterpreter to get the VM to print out the size. 57 // Max size with JVMTI 58 #ifdef AMD64 59 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; 60 #else 61 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 62 #endif // AMD64 63 64 // Global Register Names 65 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 66 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 67 68 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 69 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 70 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 71 72 73 //----------------------------------------------------------------------------- 74 75 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 76 address entry = __ pc(); 77 78 #ifdef ASSERT 79 { 80 Label L; 81 __ lea(rax, Address(rbp, 82 frame::interpreter_frame_monitor_block_top_offset * 83 wordSize)); 84 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 85 // grows negative) 86 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 87 __ stop ("interpreter frame not set up"); 88 __ bind(L); 89 } 90 #endif // ASSERT 91 // Restore bcp under the assumption that the current frame is still 92 // interpreted 93 __ restore_bcp(); 94 95 // expression stack must be empty before entering the VM if an 96 // exception happened 97 __ empty_expression_stack(); 98 // throw exception 99 __ call_VM(noreg, 100 CAST_FROM_FN_PTR(address, 101 InterpreterRuntime::throw_StackOverflowError)); 102 return entry; 103 } 104 105 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 106 address entry = __ pc(); 107 // expression stack must be empty before entering the VM if an 108 // exception happened 109 __ empty_expression_stack(); 110 // setup parameters 111 // ??? convention: expect aberrant index in register ebx 112 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 113 // Pass array to create more detailed exceptions. 114 __ call_VM(noreg, 115 CAST_FROM_FN_PTR(address, 116 InterpreterRuntime:: 117 throw_ArrayIndexOutOfBoundsException), 118 rarg, rbx); 119 return entry; 120 } 121 122 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 123 address entry = __ pc(); 124 125 // object is at TOS 126 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 127 __ pop(rarg); 128 129 // expression stack must be empty before entering the VM if an 130 // exception happened 131 __ empty_expression_stack(); 132 133 __ call_VM(noreg, 134 CAST_FROM_FN_PTR(address, 135 InterpreterRuntime:: 136 throw_ClassCastException), 137 rarg); 138 return entry; 139 } 140 141 address TemplateInterpreterGenerator::generate_exception_handler_common( 142 const char* name, const char* message, bool pass_oop) { 143 assert(!pass_oop || message == NULL, "either oop or message but not both"); 144 address entry = __ pc(); 145 146 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 147 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 148 149 if (pass_oop) { 150 // object is at TOS 151 __ pop(rarg2); 152 } 153 // expression stack must be empty before entering the VM if an 154 // exception happened 155 __ empty_expression_stack(); 156 // setup parameters 157 __ lea(rarg, ExternalAddress((address)name)); 158 if (pass_oop) { 159 __ call_VM(rax, CAST_FROM_FN_PTR(address, 160 InterpreterRuntime:: 161 create_klass_exception), 162 rarg, rarg2); 163 } else { 164 __ lea(rarg2, ExternalAddress((address)message)); 165 __ call_VM(rax, 166 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 167 rarg, rarg2); 168 } 169 // throw exception 170 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 171 return entry; 172 } 173 174 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 175 address entry = __ pc(); 176 177 #ifndef _LP64 178 #ifdef COMPILER2 179 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 180 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 181 for (int i = 1; i < 8; i++) { 182 __ ffree(i); 183 } 184 } else if (UseSSE < 2) { 185 __ empty_FPU_stack(); 186 } 187 #endif // COMPILER2 188 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 189 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 190 } else { 191 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 192 } 193 194 if (state == ftos) { 195 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 196 } else if (state == dtos) { 197 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 198 } 199 #endif // _LP64 200 201 // Restore stack bottom in case i2c adjusted stack 202 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 203 // and NULL it as marker that esp is now tos until next java call 204 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 205 206 __ restore_bcp(); 207 __ restore_locals(); 208 209 if (state == atos) { 210 Register mdp = rbx; 211 Register tmp = rcx; 212 __ profile_return_type(mdp, rax, tmp); 213 } 214 215 const Register cache = rbx; 216 const Register index = rcx; 217 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 218 219 const Register flags = cache; 220 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 221 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 222 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 223 224 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 225 if (JvmtiExport::can_pop_frame()) { 226 NOT_LP64(__ get_thread(java_thread)); 227 __ check_and_handle_popframe(java_thread); 228 } 229 if (JvmtiExport::can_force_early_return()) { 230 NOT_LP64(__ get_thread(java_thread)); 231 __ check_and_handle_earlyret(java_thread); 232 } 233 234 __ dispatch_next(state, step); 235 236 return entry; 237 } 238 239 240 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 241 address entry = __ pc(); 242 243 #ifndef _LP64 244 if (state == ftos) { 245 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 246 } else if (state == dtos) { 247 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 248 } 249 #endif // _LP64 250 251 // NULL last_sp until next java call 252 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 253 __ restore_bcp(); 254 __ restore_locals(); 255 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 256 NOT_LP64(__ get_thread(thread)); 257 #if INCLUDE_JVMCI 258 // Check if we need to take lock at entry of synchronized method. This can 259 // only occur on method entry so emit it only for vtos with step 0. 260 if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) { 261 Label L; 262 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 263 __ jcc(Assembler::zero, L); 264 // Clear flag. 265 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 266 // Satisfy calling convention for lock_method(). 267 __ get_method(rbx); 268 // Take lock. 269 lock_method(); 270 __ bind(L); 271 } else { 272 #ifdef ASSERT 273 if (EnableJVMCI) { 274 Label L; 275 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 276 __ jccb(Assembler::zero, L); 277 __ stop("unexpected pending monitor in deopt entry"); 278 __ bind(L); 279 } 280 #endif 281 } 282 #endif 283 // handle exceptions 284 { 285 Label L; 286 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 287 __ jcc(Assembler::zero, L); 288 __ call_VM(noreg, 289 CAST_FROM_FN_PTR(address, 290 InterpreterRuntime::throw_pending_exception)); 291 __ should_not_reach_here(); 292 __ bind(L); 293 } 294 if (continuation == NULL) { 295 __ dispatch_next(state, step); 296 } else { 297 __ jump_to_entry(continuation); 298 } 299 return entry; 300 } 301 302 address TemplateInterpreterGenerator::generate_result_handler_for( 303 BasicType type) { 304 address entry = __ pc(); 305 switch (type) { 306 case T_BOOLEAN: __ c2bool(rax); break; 307 #ifndef _LP64 308 case T_CHAR : __ andptr(rax, 0xFFFF); break; 309 #else 310 case T_CHAR : __ movzwl(rax, rax); break; 311 #endif // _LP64 312 case T_BYTE : __ sign_extend_byte(rax); break; 313 case T_SHORT : __ sign_extend_short(rax); break; 314 case T_INT : /* nothing to do */ break; 315 case T_LONG : /* nothing to do */ break; 316 case T_VOID : /* nothing to do */ break; 317 #ifndef _LP64 318 case T_DOUBLE : 319 case T_FLOAT : 320 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 321 __ pop(t); // remove return address first 322 // Must return a result for interpreter or compiler. In SSE 323 // mode, results are returned in xmm0 and the FPU stack must 324 // be empty. 325 if (type == T_FLOAT && UseSSE >= 1) { 326 // Load ST0 327 __ fld_d(Address(rsp, 0)); 328 // Store as float and empty fpu stack 329 __ fstp_s(Address(rsp, 0)); 330 // and reload 331 __ movflt(xmm0, Address(rsp, 0)); 332 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 333 __ movdbl(xmm0, Address(rsp, 0)); 334 } else { 335 // restore ST0 336 __ fld_d(Address(rsp, 0)); 337 } 338 // and pop the temp 339 __ addptr(rsp, 2 * wordSize); 340 __ push(t); // restore return address 341 } 342 break; 343 #else 344 case T_FLOAT : /* nothing to do */ break; 345 case T_DOUBLE : /* nothing to do */ break; 346 #endif // _LP64 347 348 case T_OBJECT : 349 // retrieve result from frame 350 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 351 // and verify it 352 __ verify_oop(rax); 353 break; 354 default : ShouldNotReachHere(); 355 } 356 __ ret(0); // return from result handler 357 return entry; 358 } 359 360 address TemplateInterpreterGenerator::generate_safept_entry_for( 361 TosState state, 362 address runtime_entry) { 363 address entry = __ pc(); 364 __ push(state); 365 __ call_VM(noreg, runtime_entry); 366 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 367 return entry; 368 } 369 370 371 372 // Helpers for commoning out cases in the various type of method entries. 373 // 374 375 376 // increment invocation count & check for overflow 377 // 378 // Note: checking for negative value instead of overflow 379 // so we have a 'sticky' overflow test 380 // 381 // rbx: method 382 // rcx: invocation counter 383 // 384 void TemplateInterpreterGenerator::generate_counter_incr( 385 Label* overflow, 386 Label* profile_method, 387 Label* profile_method_continue) { 388 Label done; 389 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 390 if (TieredCompilation) { 391 int increment = InvocationCounter::count_increment; 392 Label no_mdo; 393 if (ProfileInterpreter) { 394 // Are we profiling? 395 __ movptr(rax, Address(rbx, Method::method_data_offset())); 396 __ testptr(rax, rax); 397 __ jccb(Assembler::zero, no_mdo); 398 // Increment counter in the MDO 399 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 400 in_bytes(InvocationCounter::counter_offset())); 401 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 402 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 403 __ jmp(done); 404 } 405 __ bind(no_mdo); 406 // Increment counter in MethodCounters 407 const Address invocation_counter(rax, 408 MethodCounters::invocation_counter_offset() + 409 InvocationCounter::counter_offset()); 410 __ get_method_counters(rbx, rax, done); 411 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 412 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 413 false, Assembler::zero, overflow); 414 __ bind(done); 415 } else { // not TieredCompilation 416 const Address backedge_counter(rax, 417 MethodCounters::backedge_counter_offset() + 418 InvocationCounter::counter_offset()); 419 const Address invocation_counter(rax, 420 MethodCounters::invocation_counter_offset() + 421 InvocationCounter::counter_offset()); 422 423 __ get_method_counters(rbx, rax, done); 424 425 if (ProfileInterpreter) { 426 __ incrementl(Address(rax, 427 MethodCounters::interpreter_invocation_counter_offset())); 428 } 429 // Update standard invocation counters 430 __ movl(rcx, invocation_counter); 431 __ incrementl(rcx, InvocationCounter::count_increment); 432 __ movl(invocation_counter, rcx); // save invocation count 433 434 __ movl(rax, backedge_counter); // load backedge counter 435 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 436 437 __ addl(rcx, rax); // add both counters 438 439 // profile_method is non-null only for interpreted method so 440 // profile_method != NULL == !native_call 441 442 if (ProfileInterpreter && profile_method != NULL) { 443 // Test to see if we should create a method data oop 444 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 445 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 446 __ jcc(Assembler::less, *profile_method_continue); 447 448 // if no method data exists, go to profile_method 449 __ test_method_data_pointer(rax, *profile_method); 450 } 451 452 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 453 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 454 __ jcc(Assembler::aboveEqual, *overflow); 455 __ bind(done); 456 } 457 } 458 459 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 460 461 // Asm interpreter on entry 462 // r14/rdi - locals 463 // r13/rsi - bcp 464 // rbx - method 465 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 466 // rbp - interpreter frame 467 468 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 469 // Everything as it was on entry 470 // rdx is not restored. Doesn't appear to really be set. 471 472 // InterpreterRuntime::frequency_counter_overflow takes two 473 // arguments, the first (thread) is passed by call_VM, the second 474 // indicates if the counter overflow occurs at a backwards branch 475 // (NULL bcp). We pass zero for it. The call returns the address 476 // of the verified entry point for the method or NULL if the 477 // compilation did not complete (either went background or bailed 478 // out). 479 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 480 __ movl(rarg, 0); 481 __ call_VM(noreg, 482 CAST_FROM_FN_PTR(address, 483 InterpreterRuntime::frequency_counter_overflow), 484 rarg); 485 486 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 487 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 488 // and jump to the interpreted entry. 489 __ jmp(do_continue, relocInfo::none); 490 } 491 492 // See if we've got enough room on the stack for locals plus overhead below 493 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 494 // without going through the signal handler, i.e., reserved and yellow zones 495 // will not be made usable. The shadow zone must suffice to handle the 496 // overflow. 497 // The expression stack grows down incrementally, so the normal guard 498 // page mechanism will work for that. 499 // 500 // NOTE: Since the additional locals are also always pushed (wasn't 501 // obvious in generate_fixed_frame) so the guard should work for them 502 // too. 503 // 504 // Args: 505 // rdx: number of additional locals this frame needs (what we must check) 506 // rbx: Method* 507 // 508 // Kills: 509 // rax 510 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 511 512 // monitor entry size: see picture of stack in frame_x86.hpp 513 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 514 515 // total overhead size: entry_size + (saved rbp through expr stack 516 // bottom). be sure to change this if you add/subtract anything 517 // to/from the overhead area 518 const int overhead_size = 519 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 520 521 const int page_size = os::vm_page_size(); 522 523 Label after_frame_check; 524 525 // see if the frame is greater than one page in size. If so, 526 // then we need to verify there is enough stack space remaining 527 // for the additional locals. 528 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 529 __ jcc(Assembler::belowEqual, after_frame_check); 530 531 // compute rsp as if this were going to be the last frame on 532 // the stack before the red zone 533 534 Label after_frame_check_pop; 535 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 536 #ifndef _LP64 537 __ push(thread); 538 __ get_thread(thread); 539 #endif 540 541 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 542 543 // locals + overhead, in bytes 544 __ mov(rax, rdx); 545 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 546 __ addptr(rax, overhead_size); 547 548 #ifdef ASSERT 549 Label limit_okay; 550 // Verify that thread stack overflow limit is non-zero. 551 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 552 __ jcc(Assembler::notEqual, limit_okay); 553 __ stop("stack overflow limit is zero"); 554 __ bind(limit_okay); 555 #endif 556 557 // Add locals/frame size to stack limit. 558 __ addptr(rax, stack_limit); 559 560 // Check against the current stack bottom. 561 __ cmpptr(rsp, rax); 562 563 __ jcc(Assembler::above, after_frame_check_pop); 564 NOT_LP64(__ pop(rsi)); // get saved bcp 565 566 // Restore sender's sp as SP. This is necessary if the sender's 567 // frame is an extended compiled frame (see gen_c2i_adapter()) 568 // and safer anyway in case of JSR292 adaptations. 569 570 __ pop(rax); // return address must be moved if SP is changed 571 __ mov(rsp, rbcp); 572 __ push(rax); 573 574 // Note: the restored frame is not necessarily interpreted. 575 // Use the shared runtime version of the StackOverflowError. 576 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 577 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 578 // all done with frame size check 579 __ bind(after_frame_check_pop); 580 NOT_LP64(__ pop(rsi)); 581 582 // all done with frame size check 583 __ bind(after_frame_check); 584 } 585 586 // Allocate monitor and lock method (asm interpreter) 587 // 588 // Args: 589 // rbx: Method* 590 // r14/rdi: locals 591 // 592 // Kills: 593 // rax 594 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 595 // rscratch1, rscratch2 (scratch regs) 596 void TemplateInterpreterGenerator::lock_method() { 597 // synchronize method 598 const Address access_flags(rbx, Method::access_flags_offset()); 599 const Address monitor_block_top( 600 rbp, 601 frame::interpreter_frame_monitor_block_top_offset * wordSize); 602 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 603 604 #ifdef ASSERT 605 { 606 Label L; 607 __ movl(rax, access_flags); 608 __ testl(rax, JVM_ACC_SYNCHRONIZED); 609 __ jcc(Assembler::notZero, L); 610 __ stop("method doesn't need synchronization"); 611 __ bind(L); 612 } 613 #endif // ASSERT 614 615 // get synchronization object 616 { 617 Label done; 618 __ movl(rax, access_flags); 619 __ testl(rax, JVM_ACC_STATIC); 620 // get receiver (assume this is frequent case) 621 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 622 __ jcc(Assembler::zero, done); 623 __ load_mirror(rax, rbx); 624 625 #ifdef ASSERT 626 { 627 Label L; 628 __ testptr(rax, rax); 629 __ jcc(Assembler::notZero, L); 630 __ stop("synchronization object is NULL"); 631 __ bind(L); 632 } 633 #endif // ASSERT 634 635 __ bind(done); 636 } 637 638 // add space for monitor & lock 639 __ subptr(rsp, entry_size); // add space for a monitor entry 640 __ movptr(monitor_block_top, rsp); // set new monitor block top 641 // store object 642 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 643 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 644 __ movptr(lockreg, rsp); // object address 645 __ lock_object(lockreg); 646 } 647 648 // Generate a fixed interpreter frame. This is identical setup for 649 // interpreted methods and for native methods hence the shared code. 650 // 651 // Args: 652 // rax: return address 653 // rbx: Method* 654 // r14/rdi: pointer to locals 655 // r13/rsi: sender sp 656 // rdx: cp cache 657 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 658 // initialize fixed part of activation frame 659 __ push(rax); // save return address 660 __ enter(); // save old & set new rbp 661 __ push(rbcp); // set sender sp 662 __ push((int)NULL_WORD); // leave last_sp as null 663 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 664 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 665 __ push(rbx); // save Method* 666 // Get mirror and store it in the frame as GC root for this Method* 667 __ load_mirror(rdx, rbx); 668 __ push(rdx); 669 if (ProfileInterpreter) { 670 Label method_data_continue; 671 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 672 __ testptr(rdx, rdx); 673 __ jcc(Assembler::zero, method_data_continue); 674 __ addptr(rdx, in_bytes(MethodData::data_offset())); 675 __ bind(method_data_continue); 676 __ push(rdx); // set the mdp (method data pointer) 677 } else { 678 __ push(0); 679 } 680 681 __ movptr(rdx, Address(rbx, Method::const_offset())); 682 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 683 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 684 __ push(rdx); // set constant pool cache 685 __ push(rlocals); // set locals pointer 686 if (native_call) { 687 __ push(0); // no bcp 688 } else { 689 __ push(rbcp); // set bcp 690 } 691 __ push(0); // reserve word for pointer to expression stack bottom 692 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 693 } 694 695 // End of helpers 696 697 // Method entry for java.lang.ref.Reference.get. 698 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 699 // Code: _aload_0, _getfield, _areturn 700 // parameter size = 1 701 // 702 // The code that gets generated by this routine is split into 2 parts: 703 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 704 // 2. The slow path - which is an expansion of the regular method entry. 705 // 706 // Notes:- 707 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 708 // * We may jump to the slow path iff the receiver is null. If the 709 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 710 // Thus we can use the regular method entry code to generate the NPE. 711 // 712 // rbx: Method* 713 714 // r13: senderSP must preserve for slow path, set SP to it on fast path 715 716 address entry = __ pc(); 717 718 const int referent_offset = java_lang_ref_Reference::referent_offset; 719 guarantee(referent_offset > 0, "referent offset not initialized"); 720 721 Label slow_path; 722 // rbx: method 723 724 // Check if local 0 != NULL 725 // If the receiver is null then it is OK to jump to the slow path. 726 __ movptr(rax, Address(rsp, wordSize)); 727 728 __ testptr(rax, rax); 729 __ jcc(Assembler::zero, slow_path); 730 731 // rax: local 0 732 // rbx: method (but can be used as scratch now) 733 // rdx: scratch 734 // rdi: scratch 735 736 // Preserve the sender sp in case the load barrier 737 // calls the runtime 738 NOT_LP64(__ push(rsi)); 739 740 // Load the value of the referent field. 741 const Address field_address(rax, referent_offset); 742 __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); 743 744 // _areturn 745 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 746 NOT_LP64(__ pop(rsi)); // get sender sp 747 __ pop(rdi); // get return address 748 __ mov(rsp, sender_sp); // set sp to sender sp 749 __ jmp(rdi); 750 __ ret(0); 751 752 // generate a vanilla interpreter entry as the slow path 753 __ bind(slow_path); 754 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 755 return entry; 756 } 757 758 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 759 // Quick & dirty stack overflow checking: bang the stack & handle trap. 760 // Note that we do the banging after the frame is setup, since the exception 761 // handling code expects to find a valid interpreter frame on the stack. 762 // Doing the banging earlier fails if the caller frame is not an interpreter 763 // frame. 764 // (Also, the exception throwing code expects to unlock any synchronized 765 // method receiever, so do the banging after locking the receiver.) 766 767 // Bang each page in the shadow zone. We can't assume it's been done for 768 // an interpreter frame with greater than a page of locals, so each page 769 // needs to be checked. Only true for non-native. 770 if (UseStackBanging) { 771 const int page_size = os::vm_page_size(); 772 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 773 const int start_page = native_call ? n_shadow_pages : 1; 774 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 775 __ bang_stack_with_offset(pages*page_size); 776 } 777 } 778 } 779 780 // Interpreter stub for calling a native method. (asm interpreter) 781 // This sets up a somewhat different looking stack for calling the 782 // native method than the typical interpreter frame setup. 783 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 784 // determine code generation flags 785 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 786 787 // rbx: Method* 788 // rbcp: sender sp 789 790 address entry_point = __ pc(); 791 792 const Address constMethod (rbx, Method::const_offset()); 793 const Address access_flags (rbx, Method::access_flags_offset()); 794 const Address size_of_parameters(rcx, ConstMethod:: 795 size_of_parameters_offset()); 796 797 798 // get parameter size (always needed) 799 __ movptr(rcx, constMethod); 800 __ load_unsigned_short(rcx, size_of_parameters); 801 802 // native calls don't need the stack size check since they have no 803 // expression stack and the arguments are already on the stack and 804 // we only add a handful of words to the stack 805 806 // rbx: Method* 807 // rcx: size of parameters 808 // rbcp: sender sp 809 __ pop(rax); // get return address 810 811 // for natives the size of locals is zero 812 813 // compute beginning of parameters 814 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 815 816 // add 2 zero-initialized slots for native calls 817 // initialize result_handler slot 818 __ push((int) NULL_WORD); 819 // slot for oop temp 820 // (static native method holder mirror/jni oop result) 821 __ push((int) NULL_WORD); 822 823 // initialize fixed part of activation frame 824 generate_fixed_frame(true); 825 826 // make sure method is native & not abstract 827 #ifdef ASSERT 828 __ movl(rax, access_flags); 829 { 830 Label L; 831 __ testl(rax, JVM_ACC_NATIVE); 832 __ jcc(Assembler::notZero, L); 833 __ stop("tried to execute non-native method as native"); 834 __ bind(L); 835 } 836 { 837 Label L; 838 __ testl(rax, JVM_ACC_ABSTRACT); 839 __ jcc(Assembler::zero, L); 840 __ stop("tried to execute abstract method in interpreter"); 841 __ bind(L); 842 } 843 #endif 844 845 // Since at this point in the method invocation the exception handler 846 // would try to exit the monitor of synchronized methods which hasn't 847 // been entered yet, we set the thread local variable 848 // _do_not_unlock_if_synchronized to true. The remove_activation will 849 // check this flag. 850 851 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 852 NOT_LP64(__ get_thread(thread1)); 853 const Address do_not_unlock_if_synchronized(thread1, 854 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 855 __ movbool(do_not_unlock_if_synchronized, true); 856 857 // increment invocation count & check for overflow 858 Label invocation_counter_overflow; 859 if (inc_counter) { 860 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 861 } 862 863 Label continue_after_compile; 864 __ bind(continue_after_compile); 865 866 bang_stack_shadow_pages(true); 867 868 // reset the _do_not_unlock_if_synchronized flag 869 NOT_LP64(__ get_thread(thread1)); 870 __ movbool(do_not_unlock_if_synchronized, false); 871 872 // check for synchronized methods 873 // Must happen AFTER invocation_counter check and stack overflow check, 874 // so method is not locked if overflows. 875 if (synchronized) { 876 lock_method(); 877 } else { 878 // no synchronization necessary 879 #ifdef ASSERT 880 { 881 Label L; 882 __ movl(rax, access_flags); 883 __ testl(rax, JVM_ACC_SYNCHRONIZED); 884 __ jcc(Assembler::zero, L); 885 __ stop("method needs synchronization"); 886 __ bind(L); 887 } 888 #endif 889 } 890 891 // start execution 892 #ifdef ASSERT 893 { 894 Label L; 895 const Address monitor_block_top(rbp, 896 frame::interpreter_frame_monitor_block_top_offset * wordSize); 897 __ movptr(rax, monitor_block_top); 898 __ cmpptr(rax, rsp); 899 __ jcc(Assembler::equal, L); 900 __ stop("broken stack frame setup in interpreter"); 901 __ bind(L); 902 } 903 #endif 904 905 // jvmti support 906 __ notify_method_entry(); 907 908 // work registers 909 const Register method = rbx; 910 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 911 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 912 913 // allocate space for parameters 914 __ get_method(method); 915 __ movptr(t, Address(method, Method::const_offset())); 916 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 917 918 #ifndef _LP64 919 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 920 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 921 __ subptr(rsp, t); 922 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 923 #else 924 __ shll(t, Interpreter::logStackElementSize); 925 926 __ subptr(rsp, t); 927 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 928 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 929 #endif // _LP64 930 931 // get signature handler 932 { 933 Label L; 934 __ movptr(t, Address(method, Method::signature_handler_offset())); 935 __ testptr(t, t); 936 __ jcc(Assembler::notZero, L); 937 __ call_VM(noreg, 938 CAST_FROM_FN_PTR(address, 939 InterpreterRuntime::prepare_native_call), 940 method); 941 __ get_method(method); 942 __ movptr(t, Address(method, Method::signature_handler_offset())); 943 __ bind(L); 944 } 945 946 // call signature handler 947 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 948 "adjust this code"); 949 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 950 "adjust this code"); 951 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 952 "adjust this code"); 953 954 // The generated handlers do not touch RBX (the method oop). 955 // However, large signatures cannot be cached and are generated 956 // each time here. The slow-path generator can do a GC on return, 957 // so we must reload it after the call. 958 __ call(t); 959 __ get_method(method); // slow path can do a GC, reload RBX 960 961 962 // result handler is in rax 963 // set result handler 964 __ movptr(Address(rbp, 965 (frame::interpreter_frame_result_handler_offset) * wordSize), 966 rax); 967 968 // pass mirror handle if static call 969 { 970 Label L; 971 __ movl(t, Address(method, Method::access_flags_offset())); 972 __ testl(t, JVM_ACC_STATIC); 973 __ jcc(Assembler::zero, L); 974 // get mirror 975 __ load_mirror(t, method); 976 // copy mirror into activation frame 977 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 978 t); 979 // pass handle to mirror 980 #ifndef _LP64 981 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 982 __ movptr(Address(rsp, wordSize), t); 983 #else 984 __ lea(c_rarg1, 985 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 986 #endif // _LP64 987 __ bind(L); 988 } 989 990 // get native function entry point 991 { 992 Label L; 993 __ movptr(rax, Address(method, Method::native_function_offset())); 994 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 995 __ cmpptr(rax, unsatisfied.addr()); 996 __ jcc(Assembler::notEqual, L); 997 __ call_VM(noreg, 998 CAST_FROM_FN_PTR(address, 999 InterpreterRuntime::prepare_native_call), 1000 method); 1001 __ get_method(method); 1002 __ movptr(rax, Address(method, Method::native_function_offset())); 1003 __ bind(L); 1004 } 1005 1006 // pass JNIEnv 1007 #ifndef _LP64 1008 __ get_thread(thread); 1009 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1010 __ movptr(Address(rsp, 0), t); 1011 1012 // set_last_Java_frame_before_call 1013 // It is enough that the pc() 1014 // points into the right code segment. It does not have to be the correct return pc. 1015 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1016 #else 1017 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1018 1019 // It is enough that the pc() points into the right code 1020 // segment. It does not have to be the correct return pc. 1021 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1022 #endif // _LP64 1023 1024 // change thread state 1025 #ifdef ASSERT 1026 { 1027 Label L; 1028 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1029 __ cmpl(t, _thread_in_Java); 1030 __ jcc(Assembler::equal, L); 1031 __ stop("Wrong thread state in native stub"); 1032 __ bind(L); 1033 } 1034 #endif 1035 1036 // Change state to native 1037 1038 __ movl(Address(thread, JavaThread::thread_state_offset()), 1039 _thread_in_native); 1040 1041 // Call the native method. 1042 __ call(rax); 1043 // 32: result potentially in rdx:rax or ST0 1044 // 64: result potentially in rax or xmm0 1045 1046 // Verify or restore cpu control state after JNI call 1047 __ restore_cpu_control_state_after_jni(); 1048 1049 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1050 // in order to extract the result of a method call. If the order of these 1051 // pushes change or anything else is added to the stack then the code in 1052 // interpreter_frame_result must also change. 1053 1054 #ifndef _LP64 1055 // save potential result in ST(0) & rdx:rax 1056 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1057 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1058 // It is safe to do this push because state is _thread_in_native and return address will be found 1059 // via _last_native_pc and not via _last_jave_sp 1060 1061 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1062 // If the order changes or anything else is added to the stack the code in 1063 // interpreter_frame_result will have to be changed. 1064 1065 { Label L; 1066 Label push_double; 1067 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1068 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1069 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1070 float_handler.addr()); 1071 __ jcc(Assembler::equal, push_double); 1072 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1073 double_handler.addr()); 1074 __ jcc(Assembler::notEqual, L); 1075 __ bind(push_double); 1076 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1077 __ bind(L); 1078 } 1079 #else 1080 __ push(dtos); 1081 #endif // _LP64 1082 1083 __ push(ltos); 1084 1085 // change thread state 1086 NOT_LP64(__ get_thread(thread)); 1087 __ movl(Address(thread, JavaThread::thread_state_offset()), 1088 _thread_in_native_trans); 1089 1090 if (os::is_MP()) { 1091 if (UseMembar) { 1092 // Force this write out before the read below 1093 __ membar(Assembler::Membar_mask_bits( 1094 Assembler::LoadLoad | Assembler::LoadStore | 1095 Assembler::StoreLoad | Assembler::StoreStore)); 1096 } else { 1097 // Write serialization page so VM thread can do a pseudo remote membar. 1098 // We use the current thread pointer to calculate a thread specific 1099 // offset to write to within the page. This minimizes bus traffic 1100 // due to cache line collision. 1101 __ serialize_memory(thread, rcx); 1102 } 1103 } 1104 1105 #ifndef _LP64 1106 if (AlwaysRestoreFPU) { 1107 // Make sure the control word is correct. 1108 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1109 } 1110 #endif // _LP64 1111 1112 // check for safepoint operation in progress and/or pending suspend requests 1113 { 1114 Label Continue; 1115 Label slow_path; 1116 1117 #ifndef _LP64 1118 __ safepoint_poll(slow_path, thread, noreg); 1119 #else 1120 __ safepoint_poll(slow_path, r15_thread, rscratch1); 1121 #endif 1122 1123 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1124 __ jcc(Assembler::equal, Continue); 1125 __ bind(slow_path); 1126 1127 // Don't use call_VM as it will see a possible pending exception 1128 // and forward it and never return here preventing us from 1129 // clearing _last_native_pc down below. Also can't use 1130 // call_VM_leaf either as it will check to see if r13 & r14 are 1131 // preserved and correspond to the bcp/locals pointers. So we do a 1132 // runtime call by hand. 1133 // 1134 #ifndef _LP64 1135 __ push(thread); 1136 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1137 JavaThread::check_special_condition_for_native_trans))); 1138 __ increment(rsp, wordSize); 1139 __ get_thread(thread); 1140 #else 1141 __ mov(c_rarg0, r15_thread); 1142 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1143 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1144 __ andptr(rsp, -16); // align stack as required by ABI 1145 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1146 __ mov(rsp, r12); // restore sp 1147 __ reinit_heapbase(); 1148 #endif // _LP64 1149 __ bind(Continue); 1150 } 1151 1152 // change thread state 1153 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1154 1155 // reset_last_Java_frame 1156 __ reset_last_Java_frame(thread, true); 1157 1158 if (CheckJNICalls) { 1159 // clear_pending_jni_exception_check 1160 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1161 } 1162 1163 // reset handle block 1164 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1165 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1166 1167 // If result is an oop unbox and store it in frame where gc will see it 1168 // and result handler will pick it up 1169 1170 { 1171 Label no_oop, not_weak, store_result; 1172 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1173 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1174 __ jcc(Assembler::notEqual, no_oop); 1175 // retrieve result 1176 __ pop(ltos); 1177 // Unbox oop result, e.g. JNIHandles::resolve value. 1178 __ resolve_jobject(rax /* value */, 1179 thread /* thread */, 1180 t /* tmp */); 1181 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1182 // keep stack depth as expected by pushing oop which will eventually be discarded 1183 __ push(ltos); 1184 __ bind(no_oop); 1185 } 1186 1187 1188 { 1189 Label no_reguard; 1190 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1191 JavaThread::stack_guard_yellow_reserved_disabled); 1192 __ jcc(Assembler::notEqual, no_reguard); 1193 1194 __ pusha(); // XXX only save smashed registers 1195 #ifndef _LP64 1196 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1197 __ popa(); 1198 #else 1199 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1200 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1201 __ andptr(rsp, -16); // align stack as required by ABI 1202 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1203 __ mov(rsp, r12); // restore sp 1204 __ popa(); // XXX only restore smashed registers 1205 __ reinit_heapbase(); 1206 #endif // _LP64 1207 1208 __ bind(no_reguard); 1209 } 1210 1211 1212 // The method register is junk from after the thread_in_native transition 1213 // until here. Also can't call_VM until the bcp has been 1214 // restored. Need bcp for throwing exception below so get it now. 1215 __ get_method(method); 1216 1217 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1218 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1219 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1220 1221 // handle exceptions (exception handling will handle unlocking!) 1222 { 1223 Label L; 1224 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1225 __ jcc(Assembler::zero, L); 1226 // Note: At some point we may want to unify this with the code 1227 // used in call_VM_base(); i.e., we should use the 1228 // StubRoutines::forward_exception code. For now this doesn't work 1229 // here because the rsp is not correctly set at this point. 1230 __ MacroAssembler::call_VM(noreg, 1231 CAST_FROM_FN_PTR(address, 1232 InterpreterRuntime::throw_pending_exception)); 1233 __ should_not_reach_here(); 1234 __ bind(L); 1235 } 1236 1237 // do unlocking if necessary 1238 { 1239 Label L; 1240 __ movl(t, Address(method, Method::access_flags_offset())); 1241 __ testl(t, JVM_ACC_SYNCHRONIZED); 1242 __ jcc(Assembler::zero, L); 1243 // the code below should be shared with interpreter macro 1244 // assembler implementation 1245 { 1246 Label unlock; 1247 // BasicObjectLock will be first in list, since this is a 1248 // synchronized method. However, need to check that the object 1249 // has not been unlocked by an explicit monitorexit bytecode. 1250 const Address monitor(rbp, 1251 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1252 wordSize - (int)sizeof(BasicObjectLock))); 1253 1254 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1255 1256 // monitor expect in c_rarg1 for slow unlock path 1257 __ lea(regmon, monitor); // address of first monitor 1258 1259 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1260 __ testptr(t, t); 1261 __ jcc(Assembler::notZero, unlock); 1262 1263 // Entry already unlocked, need to throw exception 1264 __ MacroAssembler::call_VM(noreg, 1265 CAST_FROM_FN_PTR(address, 1266 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1267 __ should_not_reach_here(); 1268 1269 __ bind(unlock); 1270 __ unlock_object(regmon); 1271 } 1272 __ bind(L); 1273 } 1274 1275 // jvmti support 1276 // Note: This must happen _after_ handling/throwing any exceptions since 1277 // the exception handler code notifies the runtime of method exits 1278 // too. If this happens before, method entry/exit notifications are 1279 // not properly paired (was bug - gri 11/22/99). 1280 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1281 1282 // restore potential result in edx:eax, call result handler to 1283 // restore potential result in ST0 & handle result 1284 1285 __ pop(ltos); 1286 LP64_ONLY( __ pop(dtos)); 1287 1288 __ movptr(t, Address(rbp, 1289 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1290 __ call(t); 1291 1292 // remove activation 1293 __ movptr(t, Address(rbp, 1294 frame::interpreter_frame_sender_sp_offset * 1295 wordSize)); // get sender sp 1296 __ leave(); // remove frame anchor 1297 __ pop(rdi); // get return address 1298 __ mov(rsp, t); // set sp to sender sp 1299 __ jmp(rdi); 1300 1301 if (inc_counter) { 1302 // Handle overflow of counter and compile method 1303 __ bind(invocation_counter_overflow); 1304 generate_counter_overflow(continue_after_compile); 1305 } 1306 1307 return entry_point; 1308 } 1309 1310 // Abstract method entry 1311 // Attempt to execute abstract method. Throw exception 1312 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1313 1314 address entry_point = __ pc(); 1315 1316 // abstract method entry 1317 1318 // pop return address, reset last_sp to NULL 1319 __ empty_expression_stack(); 1320 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1321 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1322 1323 // throw exception 1324 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); 1325 // the call_VM checks for exception, so we should never return here. 1326 __ should_not_reach_here(); 1327 1328 return entry_point; 1329 } 1330 1331 // 1332 // Generic interpreted method entry to (asm) interpreter 1333 // 1334 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1335 // determine code generation flags 1336 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1337 1338 // ebx: Method* 1339 // rbcp: sender sp 1340 address entry_point = __ pc(); 1341 1342 const Address constMethod(rbx, Method::const_offset()); 1343 const Address access_flags(rbx, Method::access_flags_offset()); 1344 const Address size_of_parameters(rdx, 1345 ConstMethod::size_of_parameters_offset()); 1346 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1347 1348 1349 // get parameter size (always needed) 1350 __ movptr(rdx, constMethod); 1351 __ load_unsigned_short(rcx, size_of_parameters); 1352 1353 // rbx: Method* 1354 // rcx: size of parameters 1355 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1356 1357 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1358 __ subl(rdx, rcx); // rdx = no. of additional locals 1359 1360 // YYY 1361 // __ incrementl(rdx); 1362 // __ andl(rdx, -2); 1363 1364 // see if we've got enough room on the stack for locals plus overhead. 1365 generate_stack_overflow_check(); 1366 1367 // get return address 1368 __ pop(rax); 1369 1370 // compute beginning of parameters 1371 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1372 1373 // rdx - # of additional locals 1374 // allocate space for locals 1375 // explicitly initialize locals 1376 { 1377 Label exit, loop; 1378 __ testl(rdx, rdx); 1379 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1380 __ bind(loop); 1381 __ push((int) NULL_WORD); // initialize local variables 1382 __ decrementl(rdx); // until everything initialized 1383 __ jcc(Assembler::greater, loop); 1384 __ bind(exit); 1385 } 1386 1387 // initialize fixed part of activation frame 1388 generate_fixed_frame(false); 1389 1390 // make sure method is not native & not abstract 1391 #ifdef ASSERT 1392 __ movl(rax, access_flags); 1393 { 1394 Label L; 1395 __ testl(rax, JVM_ACC_NATIVE); 1396 __ jcc(Assembler::zero, L); 1397 __ stop("tried to execute native method as non-native"); 1398 __ bind(L); 1399 } 1400 { 1401 Label L; 1402 __ testl(rax, JVM_ACC_ABSTRACT); 1403 __ jcc(Assembler::zero, L); 1404 __ stop("tried to execute abstract method in interpreter"); 1405 __ bind(L); 1406 } 1407 #endif 1408 1409 // Since at this point in the method invocation the exception 1410 // handler would try to exit the monitor of synchronized methods 1411 // which hasn't been entered yet, we set the thread local variable 1412 // _do_not_unlock_if_synchronized to true. The remove_activation 1413 // will check this flag. 1414 1415 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1416 NOT_LP64(__ get_thread(thread)); 1417 const Address do_not_unlock_if_synchronized(thread, 1418 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1419 __ movbool(do_not_unlock_if_synchronized, true); 1420 1421 __ profile_parameters_type(rax, rcx, rdx); 1422 // increment invocation count & check for overflow 1423 Label invocation_counter_overflow; 1424 Label profile_method; 1425 Label profile_method_continue; 1426 if (inc_counter) { 1427 generate_counter_incr(&invocation_counter_overflow, 1428 &profile_method, 1429 &profile_method_continue); 1430 if (ProfileInterpreter) { 1431 __ bind(profile_method_continue); 1432 } 1433 } 1434 1435 Label continue_after_compile; 1436 __ bind(continue_after_compile); 1437 1438 // check for synchronized interpreted methods 1439 bang_stack_shadow_pages(false); 1440 1441 // reset the _do_not_unlock_if_synchronized flag 1442 NOT_LP64(__ get_thread(thread)); 1443 __ movbool(do_not_unlock_if_synchronized, false); 1444 1445 // check for synchronized methods 1446 // Must happen AFTER invocation_counter check and stack overflow check, 1447 // so method is not locked if overflows. 1448 if (synchronized) { 1449 // Allocate monitor and lock method 1450 lock_method(); 1451 } else { 1452 // no synchronization necessary 1453 #ifdef ASSERT 1454 { 1455 Label L; 1456 __ movl(rax, access_flags); 1457 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1458 __ jcc(Assembler::zero, L); 1459 __ stop("method needs synchronization"); 1460 __ bind(L); 1461 } 1462 #endif 1463 } 1464 1465 // start execution 1466 #ifdef ASSERT 1467 { 1468 Label L; 1469 const Address monitor_block_top (rbp, 1470 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1471 __ movptr(rax, monitor_block_top); 1472 __ cmpptr(rax, rsp); 1473 __ jcc(Assembler::equal, L); 1474 __ stop("broken stack frame setup in interpreter"); 1475 __ bind(L); 1476 } 1477 #endif 1478 1479 // jvmti support 1480 __ notify_method_entry(); 1481 1482 __ dispatch_next(vtos); 1483 1484 // invocation counter overflow 1485 if (inc_counter) { 1486 if (ProfileInterpreter) { 1487 // We have decided to profile this method in the interpreter 1488 __ bind(profile_method); 1489 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1490 __ set_method_data_pointer_for_bcp(); 1491 __ get_method(rbx); 1492 __ jmp(profile_method_continue); 1493 } 1494 // Handle overflow of counter and compile method 1495 __ bind(invocation_counter_overflow); 1496 generate_counter_overflow(continue_after_compile); 1497 } 1498 1499 return entry_point; 1500 } 1501 1502 //----------------------------------------------------------------------------- 1503 // Exceptions 1504 1505 void TemplateInterpreterGenerator::generate_throw_exception() { 1506 // Entry point in previous activation (i.e., if the caller was 1507 // interpreted) 1508 Interpreter::_rethrow_exception_entry = __ pc(); 1509 // Restore sp to interpreter_frame_last_sp even though we are going 1510 // to empty the expression stack for the exception processing. 1511 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1512 // rax: exception 1513 // rdx: return address/pc that threw exception 1514 __ restore_bcp(); // r13/rsi points to call/send 1515 __ restore_locals(); 1516 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1517 // Entry point for exceptions thrown within interpreter code 1518 Interpreter::_throw_exception_entry = __ pc(); 1519 // expression stack is undefined here 1520 // rax: exception 1521 // r13/rsi: exception bcp 1522 __ verify_oop(rax); 1523 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1524 LP64_ONLY(__ mov(c_rarg1, rax)); 1525 1526 // expression stack must be empty before entering the VM in case of 1527 // an exception 1528 __ empty_expression_stack(); 1529 // find exception handler address and preserve exception oop 1530 __ call_VM(rdx, 1531 CAST_FROM_FN_PTR(address, 1532 InterpreterRuntime::exception_handler_for_exception), 1533 rarg); 1534 // rax: exception handler entry point 1535 // rdx: preserved exception oop 1536 // r13/rsi: bcp for exception handler 1537 __ push_ptr(rdx); // push exception which is now the only value on the stack 1538 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1539 1540 // If the exception is not handled in the current frame the frame is 1541 // removed and the exception is rethrown (i.e. exception 1542 // continuation is _rethrow_exception). 1543 // 1544 // Note: At this point the bci is still the bxi for the instruction 1545 // which caused the exception and the expression stack is 1546 // empty. Thus, for any VM calls at this point, GC will find a legal 1547 // oop map (with empty expression stack). 1548 1549 // In current activation 1550 // tos: exception 1551 // esi: exception bcp 1552 1553 // 1554 // JVMTI PopFrame support 1555 // 1556 1557 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1558 __ empty_expression_stack(); 1559 // Set the popframe_processing bit in pending_popframe_condition 1560 // indicating that we are currently handling popframe, so that 1561 // call_VMs that may happen later do not trigger new popframe 1562 // handling cycles. 1563 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1564 NOT_LP64(__ get_thread(thread)); 1565 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1566 __ orl(rdx, JavaThread::popframe_processing_bit); 1567 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1568 1569 { 1570 // Check to see whether we are returning to a deoptimized frame. 1571 // (The PopFrame call ensures that the caller of the popped frame is 1572 // either interpreted or compiled and deoptimizes it if compiled.) 1573 // In this case, we can't call dispatch_next() after the frame is 1574 // popped, but instead must save the incoming arguments and restore 1575 // them after deoptimization has occurred. 1576 // 1577 // Note that we don't compare the return PC against the 1578 // deoptimization blob's unpack entry because of the presence of 1579 // adapter frames in C2. 1580 Label caller_not_deoptimized; 1581 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1582 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1583 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1584 InterpreterRuntime::interpreter_contains), rarg); 1585 __ testl(rax, rax); 1586 __ jcc(Assembler::notZero, caller_not_deoptimized); 1587 1588 // Compute size of arguments for saving when returning to 1589 // deoptimized caller 1590 __ get_method(rax); 1591 __ movptr(rax, Address(rax, Method::const_offset())); 1592 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1593 size_of_parameters_offset()))); 1594 __ shll(rax, Interpreter::logStackElementSize); 1595 __ restore_locals(); 1596 __ subptr(rlocals, rax); 1597 __ addptr(rlocals, wordSize); 1598 // Save these arguments 1599 NOT_LP64(__ get_thread(thread)); 1600 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1601 Deoptimization:: 1602 popframe_preserve_args), 1603 thread, rax, rlocals); 1604 1605 __ remove_activation(vtos, rdx, 1606 /* throw_monitor_exception */ false, 1607 /* install_monitor_exception */ false, 1608 /* notify_jvmdi */ false); 1609 1610 // Inform deoptimization that it is responsible for restoring 1611 // these arguments 1612 NOT_LP64(__ get_thread(thread)); 1613 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1614 JavaThread::popframe_force_deopt_reexecution_bit); 1615 1616 // Continue in deoptimization handler 1617 __ jmp(rdx); 1618 1619 __ bind(caller_not_deoptimized); 1620 } 1621 1622 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1623 /* throw_monitor_exception */ false, 1624 /* install_monitor_exception */ false, 1625 /* notify_jvmdi */ false); 1626 1627 // Finish with popframe handling 1628 // A previous I2C followed by a deoptimization might have moved the 1629 // outgoing arguments further up the stack. PopFrame expects the 1630 // mutations to those outgoing arguments to be preserved and other 1631 // constraints basically require this frame to look exactly as 1632 // though it had previously invoked an interpreted activation with 1633 // no space between the top of the expression stack (current 1634 // last_sp) and the top of stack. Rather than force deopt to 1635 // maintain this kind of invariant all the time we call a small 1636 // fixup routine to move the mutated arguments onto the top of our 1637 // expression stack if necessary. 1638 #ifndef _LP64 1639 __ mov(rax, rsp); 1640 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1641 __ get_thread(thread); 1642 // PC must point into interpreter here 1643 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1644 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1645 __ get_thread(thread); 1646 #else 1647 __ mov(c_rarg1, rsp); 1648 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1649 // PC must point into interpreter here 1650 __ set_last_Java_frame(noreg, rbp, __ pc()); 1651 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1652 #endif 1653 __ reset_last_Java_frame(thread, true); 1654 1655 // Restore the last_sp and null it out 1656 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1657 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1658 1659 __ restore_bcp(); 1660 __ restore_locals(); 1661 // The method data pointer was incremented already during 1662 // call profiling. We have to restore the mdp for the current bcp. 1663 if (ProfileInterpreter) { 1664 __ set_method_data_pointer_for_bcp(); 1665 } 1666 1667 // Clear the popframe condition flag 1668 NOT_LP64(__ get_thread(thread)); 1669 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1670 JavaThread::popframe_inactive); 1671 1672 #if INCLUDE_JVMTI 1673 { 1674 Label L_done; 1675 const Register local0 = rlocals; 1676 1677 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1678 __ jcc(Assembler::notEqual, L_done); 1679 1680 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1681 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1682 1683 __ get_method(rdx); 1684 __ movptr(rax, Address(local0, 0)); 1685 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1686 1687 __ testptr(rax, rax); 1688 __ jcc(Assembler::zero, L_done); 1689 1690 __ movptr(Address(rbx, 0), rax); 1691 __ bind(L_done); 1692 } 1693 #endif // INCLUDE_JVMTI 1694 1695 __ dispatch_next(vtos); 1696 // end of PopFrame support 1697 1698 Interpreter::_remove_activation_entry = __ pc(); 1699 1700 // preserve exception over this code sequence 1701 __ pop_ptr(rax); 1702 NOT_LP64(__ get_thread(thread)); 1703 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1704 // remove the activation (without doing throws on illegalMonitorExceptions) 1705 __ remove_activation(vtos, rdx, false, true, false); 1706 // restore exception 1707 NOT_LP64(__ get_thread(thread)); 1708 __ get_vm_result(rax, thread); 1709 1710 // In between activations - previous activation type unknown yet 1711 // compute continuation point - the continuation point expects the 1712 // following registers set up: 1713 // 1714 // rax: exception 1715 // rdx: return address/pc that threw exception 1716 // rsp: expression stack of caller 1717 // rbp: ebp of caller 1718 __ push(rax); // save exception 1719 __ push(rdx); // save return address 1720 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1721 SharedRuntime::exception_handler_for_return_address), 1722 thread, rdx); 1723 __ mov(rbx, rax); // save exception handler 1724 __ pop(rdx); // restore return address 1725 __ pop(rax); // restore exception 1726 // Note that an "issuing PC" is actually the next PC after the call 1727 __ jmp(rbx); // jump to exception 1728 // handler of caller 1729 } 1730 1731 1732 // 1733 // JVMTI ForceEarlyReturn support 1734 // 1735 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1736 address entry = __ pc(); 1737 1738 __ restore_bcp(); 1739 __ restore_locals(); 1740 __ empty_expression_stack(); 1741 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1742 1743 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1744 NOT_LP64(__ get_thread(thread)); 1745 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1746 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1747 1748 // Clear the earlyret state 1749 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1750 1751 __ remove_activation(state, rsi, 1752 false, /* throw_monitor_exception */ 1753 false, /* install_monitor_exception */ 1754 true); /* notify_jvmdi */ 1755 __ jmp(rsi); 1756 1757 return entry; 1758 } // end of ForceEarlyReturn support 1759 1760 1761 //----------------------------------------------------------------------------- 1762 // Helper for vtos entry point generation 1763 1764 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1765 address& bep, 1766 address& cep, 1767 address& sep, 1768 address& aep, 1769 address& iep, 1770 address& lep, 1771 address& fep, 1772 address& dep, 1773 address& vep) { 1774 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1775 Label L; 1776 aep = __ pc(); __ push_ptr(); __ jmp(L); 1777 #ifndef _LP64 1778 fep = __ pc(); __ push(ftos); __ jmp(L); 1779 dep = __ pc(); __ push(dtos); __ jmp(L); 1780 #else 1781 fep = __ pc(); __ push_f(xmm0); __ jmp(L); 1782 dep = __ pc(); __ push_d(xmm0); __ jmp(L); 1783 #endif // _LP64 1784 lep = __ pc(); __ push_l(); __ jmp(L); 1785 bep = cep = sep = 1786 iep = __ pc(); __ push_i(); 1787 vep = __ pc(); 1788 __ bind(L); 1789 generate_and_dispatch(t); 1790 } 1791 1792 //----------------------------------------------------------------------------- 1793 1794 // Non-product code 1795 #ifndef PRODUCT 1796 1797 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1798 address entry = __ pc(); 1799 1800 #ifndef _LP64 1801 // prepare expression stack 1802 __ pop(rcx); // pop return address so expression stack is 'pure' 1803 __ push(state); // save tosca 1804 1805 // pass tosca registers as arguments & call tracer 1806 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1807 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1808 __ pop(state); // restore tosca 1809 1810 // return 1811 __ jmp(rcx); 1812 #else 1813 __ push(state); 1814 __ push(c_rarg0); 1815 __ push(c_rarg1); 1816 __ push(c_rarg2); 1817 __ push(c_rarg3); 1818 __ mov(c_rarg2, rax); // Pass itos 1819 #ifdef _WIN64 1820 __ movflt(xmm3, xmm0); // Pass ftos 1821 #endif 1822 __ call_VM(noreg, 1823 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1824 c_rarg1, c_rarg2, c_rarg3); 1825 __ pop(c_rarg3); 1826 __ pop(c_rarg2); 1827 __ pop(c_rarg1); 1828 __ pop(c_rarg0); 1829 __ pop(state); 1830 __ ret(0); // return from result handler 1831 #endif // _LP64 1832 1833 return entry; 1834 } 1835 1836 void TemplateInterpreterGenerator::count_bytecode() { 1837 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1838 } 1839 1840 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1841 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1842 } 1843 1844 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1845 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1846 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1847 __ orl(rbx, 1848 ((int) t->bytecode()) << 1849 BytecodePairHistogram::log2_number_of_codes); 1850 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1851 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1852 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1853 } 1854 1855 1856 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1857 // Call a little run-time stub to avoid blow-up for each bytecode. 1858 // The run-time runtime saves the right registers, depending on 1859 // the tosca in-state for the given template. 1860 1861 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1862 "entry must have been generated"); 1863 #ifndef _LP64 1864 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1865 #else 1866 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1867 __ andptr(rsp, -16); // align stack as required by ABI 1868 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1869 __ mov(rsp, r12); // restore sp 1870 __ reinit_heapbase(); 1871 #endif // _LP64 1872 } 1873 1874 1875 void TemplateInterpreterGenerator::stop_interpreter_at() { 1876 Label L; 1877 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1878 StopInterpreterAt); 1879 __ jcc(Assembler::notEqual, L); 1880 __ int3(); 1881 __ bind(L); 1882 } 1883 #endif // !PRODUCT