1 /* 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 #define __ _masm-> 51 52 // Size of interpreter code. Increase if too small. Interpreter will 53 // fail with a guarantee ("not enough space for interpreter generation"); 54 // if too small. 55 // Run with +PrintInterpreter to get the VM to print out the size. 56 // Max size with JVMTI 57 #ifdef AMD64 58 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; 59 #else 60 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 61 #endif // AMD64 62 63 // Global Register Names 64 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 65 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 66 67 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 68 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 69 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 70 71 72 //----------------------------------------------------------------------------- 73 74 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 75 address entry = __ pc(); 76 77 #ifdef ASSERT 78 { 79 Label L; 80 __ lea(rax, Address(rbp, 81 frame::interpreter_frame_monitor_block_top_offset * 82 wordSize)); 83 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 84 // grows negative) 85 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 86 __ stop ("interpreter frame not set up"); 87 __ bind(L); 88 } 89 #endif // ASSERT 90 // Restore bcp under the assumption that the current frame is still 91 // interpreted 92 __ restore_bcp(); 93 94 // expression stack must be empty before entering the VM if an 95 // exception happened 96 __ empty_expression_stack(); 97 // throw exception 98 __ call_VM(noreg, 99 CAST_FROM_FN_PTR(address, 100 InterpreterRuntime::throw_StackOverflowError)); 101 return entry; 102 } 103 104 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 105 const char* name) { 106 address entry = __ pc(); 107 // expression stack must be empty before entering the VM if an 108 // exception happened 109 __ empty_expression_stack(); 110 // setup parameters 111 // ??? convention: expect aberrant index in register ebx 112 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 113 __ lea(rarg, ExternalAddress((address)name)); 114 __ call_VM(noreg, 115 CAST_FROM_FN_PTR(address, 116 InterpreterRuntime:: 117 throw_ArrayIndexOutOfBoundsException), 118 rarg, rbx); 119 return entry; 120 } 121 122 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 123 address entry = __ pc(); 124 125 // object is at TOS 126 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 127 __ pop(rarg); 128 129 // expression stack must be empty before entering the VM if an 130 // exception happened 131 __ empty_expression_stack(); 132 133 __ call_VM(noreg, 134 CAST_FROM_FN_PTR(address, 135 InterpreterRuntime:: 136 throw_ClassCastException), 137 rarg); 138 return entry; 139 } 140 141 address TemplateInterpreterGenerator::generate_exception_handler_common( 142 const char* name, const char* message, bool pass_oop) { 143 assert(!pass_oop || message == NULL, "either oop or message but not both"); 144 address entry = __ pc(); 145 146 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 147 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 148 149 if (pass_oop) { 150 // object is at TOS 151 __ pop(rarg2); 152 } 153 // expression stack must be empty before entering the VM if an 154 // exception happened 155 __ empty_expression_stack(); 156 // setup parameters 157 __ lea(rarg, ExternalAddress((address)name)); 158 if (pass_oop) { 159 __ call_VM(rax, CAST_FROM_FN_PTR(address, 160 InterpreterRuntime:: 161 create_klass_exception), 162 rarg, rarg2); 163 } else { 164 __ lea(rarg2, ExternalAddress((address)message)); 165 __ call_VM(rax, 166 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 167 rarg, rarg2); 168 } 169 // throw exception 170 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 171 return entry; 172 } 173 174 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 175 address entry = __ pc(); 176 177 #ifndef _LP64 178 #ifdef COMPILER2 179 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 180 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 181 for (int i = 1; i < 8; i++) { 182 __ ffree(i); 183 } 184 } else if (UseSSE < 2) { 185 __ empty_FPU_stack(); 186 } 187 #endif // COMPILER2 188 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 189 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 190 } else { 191 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 192 } 193 194 if (state == ftos) { 195 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 196 } else if (state == dtos) { 197 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 198 } 199 #endif // _LP64 200 201 // Restore stack bottom in case i2c adjusted stack 202 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 203 // and NULL it as marker that esp is now tos until next java call 204 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 205 206 __ restore_bcp(); 207 __ restore_locals(); 208 209 if (state == atos) { 210 Register mdp = rbx; 211 Register tmp = rcx; 212 __ profile_return_type(mdp, rax, tmp); 213 } 214 215 const Register cache = rbx; 216 const Register index = rcx; 217 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 218 219 const Register flags = cache; 220 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 221 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 222 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 223 __ dispatch_next(state, step); 224 225 return entry; 226 } 227 228 229 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 230 address entry = __ pc(); 231 232 #ifndef _LP64 233 if (state == ftos) { 234 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 235 } else if (state == dtos) { 236 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 237 } 238 #endif // _LP64 239 240 // NULL last_sp until next java call 241 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 242 __ restore_bcp(); 243 __ restore_locals(); 244 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 245 NOT_LP64(__ get_thread(thread)); 246 #if INCLUDE_JVMCI 247 // Check if we need to take lock at entry of synchronized method. This can 248 // only occur on method entry so emit it only for vtos with step 0. 249 if (UseJVMCICompiler && state == vtos && step == 0) { 250 Label L; 251 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 252 __ jcc(Assembler::zero, L); 253 // Clear flag. 254 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 255 // Satisfy calling convention for lock_method(). 256 __ get_method(rbx); 257 // Take lock. 258 lock_method(); 259 __ bind(L); 260 } else { 261 #ifdef ASSERT 262 if (UseJVMCICompiler) { 263 Label L; 264 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 265 __ jccb(Assembler::zero, L); 266 __ stop("unexpected pending monitor in deopt entry"); 267 __ bind(L); 268 } 269 #endif 270 } 271 #endif 272 // handle exceptions 273 { 274 Label L; 275 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 276 __ jcc(Assembler::zero, L); 277 __ call_VM(noreg, 278 CAST_FROM_FN_PTR(address, 279 InterpreterRuntime::throw_pending_exception)); 280 __ should_not_reach_here(); 281 __ bind(L); 282 } 283 __ dispatch_next(state, step); 284 return entry; 285 } 286 287 address TemplateInterpreterGenerator::generate_result_handler_for( 288 BasicType type) { 289 address entry = __ pc(); 290 switch (type) { 291 case T_BOOLEAN: __ c2bool(rax); break; 292 #ifndef _LP64 293 case T_CHAR : __ andptr(rax, 0xFFFF); break; 294 #else 295 case T_CHAR : __ movzwl(rax, rax); break; 296 #endif // _LP64 297 case T_BYTE : __ sign_extend_byte(rax); break; 298 case T_SHORT : __ sign_extend_short(rax); break; 299 case T_INT : /* nothing to do */ break; 300 case T_LONG : /* nothing to do */ break; 301 case T_VOID : /* nothing to do */ break; 302 #ifndef _LP64 303 case T_DOUBLE : 304 case T_FLOAT : 305 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 306 __ pop(t); // remove return address first 307 // Must return a result for interpreter or compiler. In SSE 308 // mode, results are returned in xmm0 and the FPU stack must 309 // be empty. 310 if (type == T_FLOAT && UseSSE >= 1) { 311 // Load ST0 312 __ fld_d(Address(rsp, 0)); 313 // Store as float and empty fpu stack 314 __ fstp_s(Address(rsp, 0)); 315 // and reload 316 __ movflt(xmm0, Address(rsp, 0)); 317 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 318 __ movdbl(xmm0, Address(rsp, 0)); 319 } else { 320 // restore ST0 321 __ fld_d(Address(rsp, 0)); 322 } 323 // and pop the temp 324 __ addptr(rsp, 2 * wordSize); 325 __ push(t); // restore return address 326 } 327 break; 328 #else 329 case T_FLOAT : /* nothing to do */ break; 330 case T_DOUBLE : /* nothing to do */ break; 331 #endif // _LP64 332 333 case T_OBJECT : 334 // retrieve result from frame 335 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 336 // and verify it 337 __ verify_oop(rax); 338 break; 339 default : ShouldNotReachHere(); 340 } 341 __ ret(0); // return from result handler 342 return entry; 343 } 344 345 address TemplateInterpreterGenerator::generate_safept_entry_for( 346 TosState state, 347 address runtime_entry) { 348 address entry = __ pc(); 349 __ push(state); 350 __ call_VM(noreg, runtime_entry); 351 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 352 return entry; 353 } 354 355 356 357 // Helpers for commoning out cases in the various type of method entries. 358 // 359 360 361 // increment invocation count & check for overflow 362 // 363 // Note: checking for negative value instead of overflow 364 // so we have a 'sticky' overflow test 365 // 366 // rbx: method 367 // rcx: invocation counter 368 // 369 void TemplateInterpreterGenerator::generate_counter_incr( 370 Label* overflow, 371 Label* profile_method, 372 Label* profile_method_continue) { 373 Label done; 374 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 375 if (TieredCompilation) { 376 int increment = InvocationCounter::count_increment; 377 Label no_mdo; 378 if (ProfileInterpreter) { 379 // Are we profiling? 380 __ movptr(rax, Address(rbx, Method::method_data_offset())); 381 __ testptr(rax, rax); 382 __ jccb(Assembler::zero, no_mdo); 383 // Increment counter in the MDO 384 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 385 in_bytes(InvocationCounter::counter_offset())); 386 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 387 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 388 __ jmp(done); 389 } 390 __ bind(no_mdo); 391 // Increment counter in MethodCounters 392 const Address invocation_counter(rax, 393 MethodCounters::invocation_counter_offset() + 394 InvocationCounter::counter_offset()); 395 __ get_method_counters(rbx, rax, done); 396 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 397 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 398 false, Assembler::zero, overflow); 399 __ bind(done); 400 } else { // not TieredCompilation 401 const Address backedge_counter(rax, 402 MethodCounters::backedge_counter_offset() + 403 InvocationCounter::counter_offset()); 404 const Address invocation_counter(rax, 405 MethodCounters::invocation_counter_offset() + 406 InvocationCounter::counter_offset()); 407 408 __ get_method_counters(rbx, rax, done); 409 410 if (ProfileInterpreter) { 411 __ incrementl(Address(rax, 412 MethodCounters::interpreter_invocation_counter_offset())); 413 } 414 // Update standard invocation counters 415 __ movl(rcx, invocation_counter); 416 __ incrementl(rcx, InvocationCounter::count_increment); 417 __ movl(invocation_counter, rcx); // save invocation count 418 419 __ movl(rax, backedge_counter); // load backedge counter 420 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 421 422 __ addl(rcx, rax); // add both counters 423 424 // profile_method is non-null only for interpreted method so 425 // profile_method != NULL == !native_call 426 427 if (ProfileInterpreter && profile_method != NULL) { 428 // Test to see if we should create a method data oop 429 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 430 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 431 __ jcc(Assembler::less, *profile_method_continue); 432 433 // if no method data exists, go to profile_method 434 __ test_method_data_pointer(rax, *profile_method); 435 } 436 437 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 438 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 439 __ jcc(Assembler::aboveEqual, *overflow); 440 __ bind(done); 441 } 442 } 443 444 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 445 446 // Asm interpreter on entry 447 // r14/rdi - locals 448 // r13/rsi - bcp 449 // rbx - method 450 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 451 // rbp - interpreter frame 452 453 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 454 // Everything as it was on entry 455 // rdx is not restored. Doesn't appear to really be set. 456 457 // InterpreterRuntime::frequency_counter_overflow takes two 458 // arguments, the first (thread) is passed by call_VM, the second 459 // indicates if the counter overflow occurs at a backwards branch 460 // (NULL bcp). We pass zero for it. The call returns the address 461 // of the verified entry point for the method or NULL if the 462 // compilation did not complete (either went background or bailed 463 // out). 464 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 465 __ movl(rarg, 0); 466 __ call_VM(noreg, 467 CAST_FROM_FN_PTR(address, 468 InterpreterRuntime::frequency_counter_overflow), 469 rarg); 470 471 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 472 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 473 // and jump to the interpreted entry. 474 __ jmp(do_continue, relocInfo::none); 475 } 476 477 // See if we've got enough room on the stack for locals plus overhead below 478 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 479 // without going through the signal handler, i.e., reserved and yellow zones 480 // will not be made usable. The shadow zone must suffice to handle the 481 // overflow. 482 // The expression stack grows down incrementally, so the normal guard 483 // page mechanism will work for that. 484 // 485 // NOTE: Since the additional locals are also always pushed (wasn't 486 // obvious in generate_fixed_frame) so the guard should work for them 487 // too. 488 // 489 // Args: 490 // rdx: number of additional locals this frame needs (what we must check) 491 // rbx: Method* 492 // 493 // Kills: 494 // rax 495 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 496 497 // monitor entry size: see picture of stack in frame_x86.hpp 498 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 499 500 // total overhead size: entry_size + (saved rbp through expr stack 501 // bottom). be sure to change this if you add/subtract anything 502 // to/from the overhead area 503 const int overhead_size = 504 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 505 506 const int page_size = os::vm_page_size(); 507 508 Label after_frame_check; 509 510 // see if the frame is greater than one page in size. If so, 511 // then we need to verify there is enough stack space remaining 512 // for the additional locals. 513 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 514 __ jcc(Assembler::belowEqual, after_frame_check); 515 516 // compute rsp as if this were going to be the last frame on 517 // the stack before the red zone 518 519 Label after_frame_check_pop; 520 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 521 #ifndef _LP64 522 __ push(thread); 523 __ get_thread(thread); 524 #endif 525 526 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 527 528 // locals + overhead, in bytes 529 __ mov(rax, rdx); 530 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 531 __ addptr(rax, overhead_size); 532 533 #ifdef ASSERT 534 Label limit_okay; 535 // Verify that thread stack overflow limit is non-zero. 536 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 537 __ jcc(Assembler::notEqual, limit_okay); 538 __ stop("stack overflow limit is zero"); 539 __ bind(limit_okay); 540 #endif 541 542 // Add locals/frame size to stack limit. 543 __ addptr(rax, stack_limit); 544 545 // Check against the current stack bottom. 546 __ cmpptr(rsp, rax); 547 548 __ jcc(Assembler::above, after_frame_check_pop); 549 NOT_LP64(__ pop(rsi)); // get saved bcp 550 551 // Restore sender's sp as SP. This is necessary if the sender's 552 // frame is an extended compiled frame (see gen_c2i_adapter()) 553 // and safer anyway in case of JSR292 adaptations. 554 555 __ pop(rax); // return address must be moved if SP is changed 556 __ mov(rsp, rbcp); 557 __ push(rax); 558 559 // Note: the restored frame is not necessarily interpreted. 560 // Use the shared runtime version of the StackOverflowError. 561 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 562 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 563 // all done with frame size check 564 __ bind(after_frame_check_pop); 565 NOT_LP64(__ pop(rsi)); 566 567 // all done with frame size check 568 __ bind(after_frame_check); 569 } 570 571 // Allocate monitor and lock method (asm interpreter) 572 // 573 // Args: 574 // rbx: Method* 575 // r14/rdi: locals 576 // 577 // Kills: 578 // rax 579 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 580 // rscratch1, rscratch2 (scratch regs) 581 void TemplateInterpreterGenerator::lock_method() { 582 // synchronize method 583 const Address access_flags(rbx, Method::access_flags_offset()); 584 const Address monitor_block_top( 585 rbp, 586 frame::interpreter_frame_monitor_block_top_offset * wordSize); 587 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 588 589 #ifdef ASSERT 590 { 591 Label L; 592 __ movl(rax, access_flags); 593 __ testl(rax, JVM_ACC_SYNCHRONIZED); 594 __ jcc(Assembler::notZero, L); 595 __ stop("method doesn't need synchronization"); 596 __ bind(L); 597 } 598 #endif // ASSERT 599 600 // get synchronization object 601 { 602 Label done; 603 __ movl(rax, access_flags); 604 __ testl(rax, JVM_ACC_STATIC); 605 // get receiver (assume this is frequent case) 606 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 607 __ jcc(Assembler::zero, done); 608 __ load_mirror(rax, rbx); 609 610 #ifdef ASSERT 611 { 612 Label L; 613 __ testptr(rax, rax); 614 __ jcc(Assembler::notZero, L); 615 __ stop("synchronization object is NULL"); 616 __ bind(L); 617 } 618 #endif // ASSERT 619 620 __ bind(done); 621 } 622 623 // add space for monitor & lock 624 __ subptr(rsp, entry_size); // add space for a monitor entry 625 __ movptr(monitor_block_top, rsp); // set new monitor block top 626 // store object 627 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 628 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 629 __ movptr(lockreg, rsp); // object address 630 __ lock_object(lockreg); 631 } 632 633 // Generate a fixed interpreter frame. This is identical setup for 634 // interpreted methods and for native methods hence the shared code. 635 // 636 // Args: 637 // rax: return address 638 // rbx: Method* 639 // r14/rdi: pointer to locals 640 // r13/rsi: sender sp 641 // rdx: cp cache 642 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 643 // initialize fixed part of activation frame 644 __ push(rax); // save return address 645 __ enter(); // save old & set new rbp 646 __ push(rbcp); // set sender sp 647 __ push((int)NULL_WORD); // leave last_sp as null 648 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 649 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 650 __ push(rbx); // save Method* 651 // Get mirror and store it in the frame as GC root for this Method* 652 __ load_mirror(rdx, rbx); 653 __ push(rdx); 654 if (ProfileInterpreter) { 655 Label method_data_continue; 656 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 657 __ testptr(rdx, rdx); 658 __ jcc(Assembler::zero, method_data_continue); 659 __ addptr(rdx, in_bytes(MethodData::data_offset())); 660 __ bind(method_data_continue); 661 __ push(rdx); // set the mdp (method data pointer) 662 } else { 663 __ push(0); 664 } 665 666 __ movptr(rdx, Address(rbx, Method::const_offset())); 667 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 668 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 669 __ push(rdx); // set constant pool cache 670 __ push(rlocals); // set locals pointer 671 if (native_call) { 672 __ push(0); // no bcp 673 } else { 674 __ push(rbcp); // set bcp 675 } 676 __ push(0); // reserve word for pointer to expression stack bottom 677 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 678 } 679 680 // End of helpers 681 682 // Method entry for java.lang.ref.Reference.get. 683 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 684 #if INCLUDE_ALL_GCS 685 // Code: _aload_0, _getfield, _areturn 686 // parameter size = 1 687 // 688 // The code that gets generated by this routine is split into 2 parts: 689 // 1. The "intrinsified" code for G1 (or any SATB based GC), 690 // 2. The slow path - which is an expansion of the regular method entry. 691 // 692 // Notes:- 693 // * In the G1 code we do not check whether we need to block for 694 // a safepoint. If G1 is enabled then we must execute the specialized 695 // code for Reference.get (except when the Reference object is null) 696 // so that we can log the value in the referent field with an SATB 697 // update buffer. 698 // If the code for the getfield template is modified so that the 699 // G1 pre-barrier code is executed when the current method is 700 // Reference.get() then going through the normal method entry 701 // will be fine. 702 // * The G1 code can, however, check the receiver object (the instance 703 // of java.lang.Reference) and jump to the slow path if null. If the 704 // Reference object is null then we obviously cannot fetch the referent 705 // and so we don't need to call the G1 pre-barrier. Thus we can use the 706 // regular method entry code to generate the NPE. 707 // 708 // rbx: Method* 709 710 // r13: senderSP must preserve for slow path, set SP to it on fast path 711 712 address entry = __ pc(); 713 714 const int referent_offset = java_lang_ref_Reference::referent_offset; 715 guarantee(referent_offset > 0, "referent offset not initialized"); 716 717 if (UseG1GC) { 718 Label slow_path; 719 // rbx: method 720 721 // Check if local 0 != NULL 722 // If the receiver is null then it is OK to jump to the slow path. 723 __ movptr(rax, Address(rsp, wordSize)); 724 725 __ testptr(rax, rax); 726 __ jcc(Assembler::zero, slow_path); 727 728 // rax: local 0 729 // rbx: method (but can be used as scratch now) 730 // rdx: scratch 731 // rdi: scratch 732 733 // Preserve the sender sp in case the pre-barrier 734 // calls the runtime 735 NOT_LP64(__ push(rsi)); 736 737 // Generate the G1 pre-barrier code to log the value of 738 // the referent field in an SATB buffer. 739 740 // Load the value of the referent field. 741 const Address field_address(rax, referent_offset); 742 __ load_heap_oop(rax, field_address); 743 744 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 745 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 746 NOT_LP64(__ get_thread(thread)); 747 748 // Generate the G1 pre-barrier code to log the value of 749 // the referent field in an SATB buffer. 750 __ g1_write_barrier_pre(noreg /* obj */, 751 rax /* pre_val */, 752 thread /* thread */, 753 rbx /* tmp */, 754 true /* tosca_live */, 755 true /* expand_call */); 756 757 // _areturn 758 NOT_LP64(__ pop(rsi)); // get sender sp 759 __ pop(rdi); // get return address 760 __ mov(rsp, sender_sp); // set sp to sender sp 761 __ jmp(rdi); 762 __ ret(0); 763 764 // generate a vanilla interpreter entry as the slow path 765 __ bind(slow_path); 766 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 767 return entry; 768 } 769 #endif // INCLUDE_ALL_GCS 770 771 // If G1 is not enabled then attempt to go through the accessor entry point 772 // Reference.get is an accessor 773 return NULL; 774 } 775 776 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 777 // Quick & dirty stack overflow checking: bang the stack & handle trap. 778 // Note that we do the banging after the frame is setup, since the exception 779 // handling code expects to find a valid interpreter frame on the stack. 780 // Doing the banging earlier fails if the caller frame is not an interpreter 781 // frame. 782 // (Also, the exception throwing code expects to unlock any synchronized 783 // method receiever, so do the banging after locking the receiver.) 784 785 // Bang each page in the shadow zone. We can't assume it's been done for 786 // an interpreter frame with greater than a page of locals, so each page 787 // needs to be checked. Only true for non-native. 788 if (UseStackBanging) { 789 const int page_size = os::vm_page_size(); 790 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 791 const int start_page = native_call ? n_shadow_pages : 1; 792 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 793 __ bang_stack_with_offset(pages*page_size); 794 } 795 } 796 } 797 798 // Interpreter stub for calling a native method. (asm interpreter) 799 // This sets up a somewhat different looking stack for calling the 800 // native method than the typical interpreter frame setup. 801 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 802 // determine code generation flags 803 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 804 805 // rbx: Method* 806 // rbcp: sender sp 807 808 address entry_point = __ pc(); 809 810 const Address constMethod (rbx, Method::const_offset()); 811 const Address access_flags (rbx, Method::access_flags_offset()); 812 const Address size_of_parameters(rcx, ConstMethod:: 813 size_of_parameters_offset()); 814 815 816 // get parameter size (always needed) 817 __ movptr(rcx, constMethod); 818 __ load_unsigned_short(rcx, size_of_parameters); 819 820 // native calls don't need the stack size check since they have no 821 // expression stack and the arguments are already on the stack and 822 // we only add a handful of words to the stack 823 824 // rbx: Method* 825 // rcx: size of parameters 826 // rbcp: sender sp 827 __ pop(rax); // get return address 828 829 // for natives the size of locals is zero 830 831 // compute beginning of parameters 832 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 833 834 // add 2 zero-initialized slots for native calls 835 // initialize result_handler slot 836 __ push((int) NULL_WORD); 837 // slot for oop temp 838 // (static native method holder mirror/jni oop result) 839 __ push((int) NULL_WORD); 840 841 // initialize fixed part of activation frame 842 generate_fixed_frame(true); 843 844 // make sure method is native & not abstract 845 #ifdef ASSERT 846 __ movl(rax, access_flags); 847 { 848 Label L; 849 __ testl(rax, JVM_ACC_NATIVE); 850 __ jcc(Assembler::notZero, L); 851 __ stop("tried to execute non-native method as native"); 852 __ bind(L); 853 } 854 { 855 Label L; 856 __ testl(rax, JVM_ACC_ABSTRACT); 857 __ jcc(Assembler::zero, L); 858 __ stop("tried to execute abstract method in interpreter"); 859 __ bind(L); 860 } 861 #endif 862 863 // Since at this point in the method invocation the exception handler 864 // would try to exit the monitor of synchronized methods which hasn't 865 // been entered yet, we set the thread local variable 866 // _do_not_unlock_if_synchronized to true. The remove_activation will 867 // check this flag. 868 869 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 870 NOT_LP64(__ get_thread(thread1)); 871 const Address do_not_unlock_if_synchronized(thread1, 872 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 873 __ movbool(do_not_unlock_if_synchronized, true); 874 875 // increment invocation count & check for overflow 876 Label invocation_counter_overflow; 877 if (inc_counter) { 878 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 879 } 880 881 Label continue_after_compile; 882 __ bind(continue_after_compile); 883 884 bang_stack_shadow_pages(true); 885 886 // reset the _do_not_unlock_if_synchronized flag 887 NOT_LP64(__ get_thread(thread1)); 888 __ movbool(do_not_unlock_if_synchronized, false); 889 890 // check for synchronized methods 891 // Must happen AFTER invocation_counter check and stack overflow check, 892 // so method is not locked if overflows. 893 if (synchronized) { 894 lock_method(); 895 } else { 896 // no synchronization necessary 897 #ifdef ASSERT 898 { 899 Label L; 900 __ movl(rax, access_flags); 901 __ testl(rax, JVM_ACC_SYNCHRONIZED); 902 __ jcc(Assembler::zero, L); 903 __ stop("method needs synchronization"); 904 __ bind(L); 905 } 906 #endif 907 } 908 909 // start execution 910 #ifdef ASSERT 911 { 912 Label L; 913 const Address monitor_block_top(rbp, 914 frame::interpreter_frame_monitor_block_top_offset * wordSize); 915 __ movptr(rax, monitor_block_top); 916 __ cmpptr(rax, rsp); 917 __ jcc(Assembler::equal, L); 918 __ stop("broken stack frame setup in interpreter"); 919 __ bind(L); 920 } 921 #endif 922 923 // jvmti support 924 __ notify_method_entry(); 925 926 // work registers 927 const Register method = rbx; 928 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 929 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 930 931 // allocate space for parameters 932 __ get_method(method); 933 __ movptr(t, Address(method, Method::const_offset())); 934 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 935 936 #ifndef _LP64 937 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 938 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 939 __ subptr(rsp, t); 940 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 941 #else 942 __ shll(t, Interpreter::logStackElementSize); 943 944 __ subptr(rsp, t); 945 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 946 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 947 #endif // _LP64 948 949 // get signature handler 950 { 951 Label L; 952 __ movptr(t, Address(method, Method::signature_handler_offset())); 953 __ testptr(t, t); 954 __ jcc(Assembler::notZero, L); 955 __ call_VM(noreg, 956 CAST_FROM_FN_PTR(address, 957 InterpreterRuntime::prepare_native_call), 958 method); 959 __ get_method(method); 960 __ movptr(t, Address(method, Method::signature_handler_offset())); 961 __ bind(L); 962 } 963 964 // call signature handler 965 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 966 "adjust this code"); 967 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 968 "adjust this code"); 969 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 970 "adjust this code"); 971 972 // The generated handlers do not touch RBX (the method oop). 973 // However, large signatures cannot be cached and are generated 974 // each time here. The slow-path generator can do a GC on return, 975 // so we must reload it after the call. 976 __ call(t); 977 __ get_method(method); // slow path can do a GC, reload RBX 978 979 980 // result handler is in rax 981 // set result handler 982 __ movptr(Address(rbp, 983 (frame::interpreter_frame_result_handler_offset) * wordSize), 984 rax); 985 986 // pass mirror handle if static call 987 { 988 Label L; 989 __ movl(t, Address(method, Method::access_flags_offset())); 990 __ testl(t, JVM_ACC_STATIC); 991 __ jcc(Assembler::zero, L); 992 // get mirror 993 __ load_mirror(t, method); 994 // copy mirror into activation frame 995 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 996 t); 997 // pass handle to mirror 998 #ifndef _LP64 999 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1000 __ movptr(Address(rsp, wordSize), t); 1001 #else 1002 __ lea(c_rarg1, 1003 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1004 #endif // _LP64 1005 __ bind(L); 1006 } 1007 1008 // get native function entry point 1009 { 1010 Label L; 1011 __ movptr(rax, Address(method, Method::native_function_offset())); 1012 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1013 __ cmpptr(rax, unsatisfied.addr()); 1014 __ jcc(Assembler::notEqual, L); 1015 __ call_VM(noreg, 1016 CAST_FROM_FN_PTR(address, 1017 InterpreterRuntime::prepare_native_call), 1018 method); 1019 __ get_method(method); 1020 __ movptr(rax, Address(method, Method::native_function_offset())); 1021 __ bind(L); 1022 } 1023 1024 // pass JNIEnv 1025 #ifndef _LP64 1026 __ get_thread(thread); 1027 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1028 __ movptr(Address(rsp, 0), t); 1029 1030 // set_last_Java_frame_before_call 1031 // It is enough that the pc() 1032 // points into the right code segment. It does not have to be the correct return pc. 1033 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1034 #else 1035 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1036 1037 // It is enough that the pc() points into the right code 1038 // segment. It does not have to be the correct return pc. 1039 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1040 #endif // _LP64 1041 1042 // change thread state 1043 #ifdef ASSERT 1044 { 1045 Label L; 1046 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1047 __ cmpl(t, _thread_in_Java); 1048 __ jcc(Assembler::equal, L); 1049 __ stop("Wrong thread state in native stub"); 1050 __ bind(L); 1051 } 1052 #endif 1053 1054 // Change state to native 1055 1056 __ movl(Address(thread, JavaThread::thread_state_offset()), 1057 _thread_in_native); 1058 1059 // Call the native method. 1060 __ call(rax); 1061 // 32: result potentially in rdx:rax or ST0 1062 // 64: result potentially in rax or xmm0 1063 1064 // Verify or restore cpu control state after JNI call 1065 __ restore_cpu_control_state_after_jni(); 1066 1067 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1068 // in order to extract the result of a method call. If the order of these 1069 // pushes change or anything else is added to the stack then the code in 1070 // interpreter_frame_result must also change. 1071 1072 #ifndef _LP64 1073 // save potential result in ST(0) & rdx:rax 1074 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1075 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1076 // It is safe to do this push because state is _thread_in_native and return address will be found 1077 // via _last_native_pc and not via _last_jave_sp 1078 1079 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1080 // If the order changes or anything else is added to the stack the code in 1081 // interpreter_frame_result will have to be changed. 1082 1083 { Label L; 1084 Label push_double; 1085 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1086 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1087 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1088 float_handler.addr()); 1089 __ jcc(Assembler::equal, push_double); 1090 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1091 double_handler.addr()); 1092 __ jcc(Assembler::notEqual, L); 1093 __ bind(push_double); 1094 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1095 __ bind(L); 1096 } 1097 #else 1098 __ push(dtos); 1099 #endif // _LP64 1100 1101 __ push(ltos); 1102 1103 // change thread state 1104 NOT_LP64(__ get_thread(thread)); 1105 __ movl(Address(thread, JavaThread::thread_state_offset()), 1106 _thread_in_native_trans); 1107 1108 if (os::is_MP()) { 1109 if (UseMembar) { 1110 // Force this write out before the read below 1111 __ membar(Assembler::Membar_mask_bits( 1112 Assembler::LoadLoad | Assembler::LoadStore | 1113 Assembler::StoreLoad | Assembler::StoreStore)); 1114 } else { 1115 // Write serialization page so VM thread can do a pseudo remote membar. 1116 // We use the current thread pointer to calculate a thread specific 1117 // offset to write to within the page. This minimizes bus traffic 1118 // due to cache line collision. 1119 __ serialize_memory(thread, rcx); 1120 } 1121 } 1122 1123 #ifndef _LP64 1124 if (AlwaysRestoreFPU) { 1125 // Make sure the control word is correct. 1126 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1127 } 1128 #endif // _LP64 1129 1130 // check for safepoint operation in progress and/or pending suspend requests 1131 { 1132 Label Continue; 1133 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1134 SafepointSynchronize::_not_synchronized); 1135 1136 Label L; 1137 __ jcc(Assembler::notEqual, L); 1138 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1139 __ jcc(Assembler::equal, Continue); 1140 __ bind(L); 1141 1142 // Don't use call_VM as it will see a possible pending exception 1143 // and forward it and never return here preventing us from 1144 // clearing _last_native_pc down below. Also can't use 1145 // call_VM_leaf either as it will check to see if r13 & r14 are 1146 // preserved and correspond to the bcp/locals pointers. So we do a 1147 // runtime call by hand. 1148 // 1149 #ifndef _LP64 1150 __ push(thread); 1151 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1152 JavaThread::check_special_condition_for_native_trans))); 1153 __ increment(rsp, wordSize); 1154 __ get_thread(thread); 1155 #else 1156 __ mov(c_rarg0, r15_thread); 1157 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1158 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1159 __ andptr(rsp, -16); // align stack as required by ABI 1160 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1161 __ mov(rsp, r12); // restore sp 1162 __ reinit_heapbase(); 1163 #endif // _LP64 1164 __ bind(Continue); 1165 } 1166 1167 // change thread state 1168 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1169 1170 // reset_last_Java_frame 1171 __ reset_last_Java_frame(thread, true); 1172 1173 if (CheckJNICalls) { 1174 // clear_pending_jni_exception_check 1175 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1176 } 1177 1178 // reset handle block 1179 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1180 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1181 1182 // If result is an oop unbox and store it in frame where gc will see it 1183 // and result handler will pick it up 1184 1185 { 1186 Label no_oop, store_result; 1187 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1188 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1189 __ jcc(Assembler::notEqual, no_oop); 1190 // retrieve result 1191 __ pop(ltos); 1192 __ testptr(rax, rax); 1193 __ jcc(Assembler::zero, store_result); 1194 __ movptr(rax, Address(rax, 0)); 1195 __ bind(store_result); 1196 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1197 // keep stack depth as expected by pushing oop which will eventually be discarded 1198 __ push(ltos); 1199 __ bind(no_oop); 1200 } 1201 1202 1203 { 1204 Label no_reguard; 1205 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1206 JavaThread::stack_guard_yellow_reserved_disabled); 1207 __ jcc(Assembler::notEqual, no_reguard); 1208 1209 __ pusha(); // XXX only save smashed registers 1210 #ifndef _LP64 1211 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1212 __ popa(); 1213 #else 1214 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1215 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1216 __ andptr(rsp, -16); // align stack as required by ABI 1217 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1218 __ mov(rsp, r12); // restore sp 1219 __ popa(); // XXX only restore smashed registers 1220 __ reinit_heapbase(); 1221 #endif // _LP64 1222 1223 __ bind(no_reguard); 1224 } 1225 1226 1227 // The method register is junk from after the thread_in_native transition 1228 // until here. Also can't call_VM until the bcp has been 1229 // restored. Need bcp for throwing exception below so get it now. 1230 __ get_method(method); 1231 1232 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1233 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1234 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1235 1236 // handle exceptions (exception handling will handle unlocking!) 1237 { 1238 Label L; 1239 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1240 __ jcc(Assembler::zero, L); 1241 // Note: At some point we may want to unify this with the code 1242 // used in call_VM_base(); i.e., we should use the 1243 // StubRoutines::forward_exception code. For now this doesn't work 1244 // here because the rsp is not correctly set at this point. 1245 __ MacroAssembler::call_VM(noreg, 1246 CAST_FROM_FN_PTR(address, 1247 InterpreterRuntime::throw_pending_exception)); 1248 __ should_not_reach_here(); 1249 __ bind(L); 1250 } 1251 1252 // do unlocking if necessary 1253 { 1254 Label L; 1255 __ movl(t, Address(method, Method::access_flags_offset())); 1256 __ testl(t, JVM_ACC_SYNCHRONIZED); 1257 __ jcc(Assembler::zero, L); 1258 // the code below should be shared with interpreter macro 1259 // assembler implementation 1260 { 1261 Label unlock; 1262 // BasicObjectLock will be first in list, since this is a 1263 // synchronized method. However, need to check that the object 1264 // has not been unlocked by an explicit monitorexit bytecode. 1265 const Address monitor(rbp, 1266 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1267 wordSize - (int)sizeof(BasicObjectLock))); 1268 1269 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1270 1271 // monitor expect in c_rarg1 for slow unlock path 1272 __ lea(regmon, monitor); // address of first monitor 1273 1274 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1275 __ testptr(t, t); 1276 __ jcc(Assembler::notZero, unlock); 1277 1278 // Entry already unlocked, need to throw exception 1279 __ MacroAssembler::call_VM(noreg, 1280 CAST_FROM_FN_PTR(address, 1281 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1282 __ should_not_reach_here(); 1283 1284 __ bind(unlock); 1285 __ unlock_object(regmon); 1286 } 1287 __ bind(L); 1288 } 1289 1290 // jvmti support 1291 // Note: This must happen _after_ handling/throwing any exceptions since 1292 // the exception handler code notifies the runtime of method exits 1293 // too. If this happens before, method entry/exit notifications are 1294 // not properly paired (was bug - gri 11/22/99). 1295 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1296 1297 // restore potential result in edx:eax, call result handler to 1298 // restore potential result in ST0 & handle result 1299 1300 __ pop(ltos); 1301 LP64_ONLY( __ pop(dtos)); 1302 1303 __ movptr(t, Address(rbp, 1304 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1305 __ call(t); 1306 1307 // remove activation 1308 __ movptr(t, Address(rbp, 1309 frame::interpreter_frame_sender_sp_offset * 1310 wordSize)); // get sender sp 1311 __ leave(); // remove frame anchor 1312 __ pop(rdi); // get return address 1313 __ mov(rsp, t); // set sp to sender sp 1314 __ jmp(rdi); 1315 1316 if (inc_counter) { 1317 // Handle overflow of counter and compile method 1318 __ bind(invocation_counter_overflow); 1319 generate_counter_overflow(continue_after_compile); 1320 } 1321 1322 return entry_point; 1323 } 1324 1325 // Abstract method entry 1326 // Attempt to execute abstract method. Throw exception 1327 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1328 1329 address entry_point = __ pc(); 1330 1331 // abstract method entry 1332 1333 // pop return address, reset last_sp to NULL 1334 __ empty_expression_stack(); 1335 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1336 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1337 1338 // throw exception 1339 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 1340 // the call_VM checks for exception, so we should never return here. 1341 __ should_not_reach_here(); 1342 1343 return entry_point; 1344 } 1345 1346 // 1347 // Generic interpreted method entry to (asm) interpreter 1348 // 1349 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1350 // determine code generation flags 1351 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1352 1353 // ebx: Method* 1354 // rbcp: sender sp 1355 address entry_point = __ pc(); 1356 1357 const Address constMethod(rbx, Method::const_offset()); 1358 const Address access_flags(rbx, Method::access_flags_offset()); 1359 const Address size_of_parameters(rdx, 1360 ConstMethod::size_of_parameters_offset()); 1361 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1362 1363 1364 // get parameter size (always needed) 1365 __ movptr(rdx, constMethod); 1366 __ load_unsigned_short(rcx, size_of_parameters); 1367 1368 // rbx: Method* 1369 // rcx: size of parameters 1370 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1371 1372 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1373 __ subl(rdx, rcx); // rdx = no. of additional locals 1374 1375 // YYY 1376 // __ incrementl(rdx); 1377 // __ andl(rdx, -2); 1378 1379 // see if we've got enough room on the stack for locals plus overhead. 1380 generate_stack_overflow_check(); 1381 1382 // get return address 1383 __ pop(rax); 1384 1385 // compute beginning of parameters 1386 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1387 1388 // rdx - # of additional locals 1389 // allocate space for locals 1390 // explicitly initialize locals 1391 { 1392 Label exit, loop; 1393 __ testl(rdx, rdx); 1394 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1395 __ bind(loop); 1396 __ push((int) NULL_WORD); // initialize local variables 1397 __ decrementl(rdx); // until everything initialized 1398 __ jcc(Assembler::greater, loop); 1399 __ bind(exit); 1400 } 1401 1402 // initialize fixed part of activation frame 1403 generate_fixed_frame(false); 1404 1405 // make sure method is not native & not abstract 1406 #ifdef ASSERT 1407 __ movl(rax, access_flags); 1408 { 1409 Label L; 1410 __ testl(rax, JVM_ACC_NATIVE); 1411 __ jcc(Assembler::zero, L); 1412 __ stop("tried to execute native method as non-native"); 1413 __ bind(L); 1414 } 1415 { 1416 Label L; 1417 __ testl(rax, JVM_ACC_ABSTRACT); 1418 __ jcc(Assembler::zero, L); 1419 __ stop("tried to execute abstract method in interpreter"); 1420 __ bind(L); 1421 } 1422 #endif 1423 1424 // Since at this point in the method invocation the exception 1425 // handler would try to exit the monitor of synchronized methods 1426 // which hasn't been entered yet, we set the thread local variable 1427 // _do_not_unlock_if_synchronized to true. The remove_activation 1428 // will check this flag. 1429 1430 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1431 NOT_LP64(__ get_thread(thread)); 1432 const Address do_not_unlock_if_synchronized(thread, 1433 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1434 __ movbool(do_not_unlock_if_synchronized, true); 1435 1436 __ profile_parameters_type(rax, rcx, rdx); 1437 // increment invocation count & check for overflow 1438 Label invocation_counter_overflow; 1439 Label profile_method; 1440 Label profile_method_continue; 1441 if (inc_counter) { 1442 generate_counter_incr(&invocation_counter_overflow, 1443 &profile_method, 1444 &profile_method_continue); 1445 if (ProfileInterpreter) { 1446 __ bind(profile_method_continue); 1447 } 1448 } 1449 1450 Label continue_after_compile; 1451 __ bind(continue_after_compile); 1452 1453 // check for synchronized interpreted methods 1454 bang_stack_shadow_pages(false); 1455 1456 // reset the _do_not_unlock_if_synchronized flag 1457 NOT_LP64(__ get_thread(thread)); 1458 __ movbool(do_not_unlock_if_synchronized, false); 1459 1460 // check for synchronized methods 1461 // Must happen AFTER invocation_counter check and stack overflow check, 1462 // so method is not locked if overflows. 1463 if (synchronized) { 1464 // Allocate monitor and lock method 1465 lock_method(); 1466 } else { 1467 // no synchronization necessary 1468 #ifdef ASSERT 1469 { 1470 Label L; 1471 __ movl(rax, access_flags); 1472 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1473 __ jcc(Assembler::zero, L); 1474 __ stop("method needs synchronization"); 1475 __ bind(L); 1476 } 1477 #endif 1478 } 1479 1480 // start execution 1481 #ifdef ASSERT 1482 { 1483 Label L; 1484 const Address monitor_block_top (rbp, 1485 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1486 __ movptr(rax, monitor_block_top); 1487 __ cmpptr(rax, rsp); 1488 __ jcc(Assembler::equal, L); 1489 __ stop("broken stack frame setup in interpreter"); 1490 __ bind(L); 1491 } 1492 #endif 1493 1494 // jvmti support 1495 __ notify_method_entry(); 1496 1497 __ dispatch_next(vtos); 1498 1499 // invocation counter overflow 1500 if (inc_counter) { 1501 if (ProfileInterpreter) { 1502 // We have decided to profile this method in the interpreter 1503 __ bind(profile_method); 1504 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1505 __ set_method_data_pointer_for_bcp(); 1506 __ get_method(rbx); 1507 __ jmp(profile_method_continue); 1508 } 1509 // Handle overflow of counter and compile method 1510 __ bind(invocation_counter_overflow); 1511 generate_counter_overflow(continue_after_compile); 1512 } 1513 1514 return entry_point; 1515 } 1516 1517 //----------------------------------------------------------------------------- 1518 // Exceptions 1519 1520 void TemplateInterpreterGenerator::generate_throw_exception() { 1521 // Entry point in previous activation (i.e., if the caller was 1522 // interpreted) 1523 Interpreter::_rethrow_exception_entry = __ pc(); 1524 // Restore sp to interpreter_frame_last_sp even though we are going 1525 // to empty the expression stack for the exception processing. 1526 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1527 // rax: exception 1528 // rdx: return address/pc that threw exception 1529 __ restore_bcp(); // r13/rsi points to call/send 1530 __ restore_locals(); 1531 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1532 // Entry point for exceptions thrown within interpreter code 1533 Interpreter::_throw_exception_entry = __ pc(); 1534 // expression stack is undefined here 1535 // rax: exception 1536 // r13/rsi: exception bcp 1537 __ verify_oop(rax); 1538 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1539 LP64_ONLY(__ mov(c_rarg1, rax)); 1540 1541 // expression stack must be empty before entering the VM in case of 1542 // an exception 1543 __ empty_expression_stack(); 1544 // find exception handler address and preserve exception oop 1545 __ call_VM(rdx, 1546 CAST_FROM_FN_PTR(address, 1547 InterpreterRuntime::exception_handler_for_exception), 1548 rarg); 1549 // rax: exception handler entry point 1550 // rdx: preserved exception oop 1551 // r13/rsi: bcp for exception handler 1552 __ push_ptr(rdx); // push exception which is now the only value on the stack 1553 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1554 1555 // If the exception is not handled in the current frame the frame is 1556 // removed and the exception is rethrown (i.e. exception 1557 // continuation is _rethrow_exception). 1558 // 1559 // Note: At this point the bci is still the bxi for the instruction 1560 // which caused the exception and the expression stack is 1561 // empty. Thus, for any VM calls at this point, GC will find a legal 1562 // oop map (with empty expression stack). 1563 1564 // In current activation 1565 // tos: exception 1566 // esi: exception bcp 1567 1568 // 1569 // JVMTI PopFrame support 1570 // 1571 1572 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1573 __ empty_expression_stack(); 1574 // Set the popframe_processing bit in pending_popframe_condition 1575 // indicating that we are currently handling popframe, so that 1576 // call_VMs that may happen later do not trigger new popframe 1577 // handling cycles. 1578 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1579 NOT_LP64(__ get_thread(thread)); 1580 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1581 __ orl(rdx, JavaThread::popframe_processing_bit); 1582 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1583 1584 { 1585 // Check to see whether we are returning to a deoptimized frame. 1586 // (The PopFrame call ensures that the caller of the popped frame is 1587 // either interpreted or compiled and deoptimizes it if compiled.) 1588 // In this case, we can't call dispatch_next() after the frame is 1589 // popped, but instead must save the incoming arguments and restore 1590 // them after deoptimization has occurred. 1591 // 1592 // Note that we don't compare the return PC against the 1593 // deoptimization blob's unpack entry because of the presence of 1594 // adapter frames in C2. 1595 Label caller_not_deoptimized; 1596 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1597 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1598 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1599 InterpreterRuntime::interpreter_contains), rarg); 1600 __ testl(rax, rax); 1601 __ jcc(Assembler::notZero, caller_not_deoptimized); 1602 1603 // Compute size of arguments for saving when returning to 1604 // deoptimized caller 1605 __ get_method(rax); 1606 __ movptr(rax, Address(rax, Method::const_offset())); 1607 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1608 size_of_parameters_offset()))); 1609 __ shll(rax, Interpreter::logStackElementSize); 1610 __ restore_locals(); 1611 __ subptr(rlocals, rax); 1612 __ addptr(rlocals, wordSize); 1613 // Save these arguments 1614 NOT_LP64(__ get_thread(thread)); 1615 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1616 Deoptimization:: 1617 popframe_preserve_args), 1618 thread, rax, rlocals); 1619 1620 __ remove_activation(vtos, rdx, 1621 /* throw_monitor_exception */ false, 1622 /* install_monitor_exception */ false, 1623 /* notify_jvmdi */ false); 1624 1625 // Inform deoptimization that it is responsible for restoring 1626 // these arguments 1627 NOT_LP64(__ get_thread(thread)); 1628 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1629 JavaThread::popframe_force_deopt_reexecution_bit); 1630 1631 // Continue in deoptimization handler 1632 __ jmp(rdx); 1633 1634 __ bind(caller_not_deoptimized); 1635 } 1636 1637 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1638 /* throw_monitor_exception */ false, 1639 /* install_monitor_exception */ false, 1640 /* notify_jvmdi */ false); 1641 1642 // Finish with popframe handling 1643 // A previous I2C followed by a deoptimization might have moved the 1644 // outgoing arguments further up the stack. PopFrame expects the 1645 // mutations to those outgoing arguments to be preserved and other 1646 // constraints basically require this frame to look exactly as 1647 // though it had previously invoked an interpreted activation with 1648 // no space between the top of the expression stack (current 1649 // last_sp) and the top of stack. Rather than force deopt to 1650 // maintain this kind of invariant all the time we call a small 1651 // fixup routine to move the mutated arguments onto the top of our 1652 // expression stack if necessary. 1653 #ifndef _LP64 1654 __ mov(rax, rsp); 1655 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1656 __ get_thread(thread); 1657 // PC must point into interpreter here 1658 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1659 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1660 __ get_thread(thread); 1661 #else 1662 __ mov(c_rarg1, rsp); 1663 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1664 // PC must point into interpreter here 1665 __ set_last_Java_frame(noreg, rbp, __ pc()); 1666 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1667 #endif 1668 __ reset_last_Java_frame(thread, true); 1669 1670 // Restore the last_sp and null it out 1671 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1672 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1673 1674 __ restore_bcp(); 1675 __ restore_locals(); 1676 // The method data pointer was incremented already during 1677 // call profiling. We have to restore the mdp for the current bcp. 1678 if (ProfileInterpreter) { 1679 __ set_method_data_pointer_for_bcp(); 1680 } 1681 1682 // Clear the popframe condition flag 1683 NOT_LP64(__ get_thread(thread)); 1684 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1685 JavaThread::popframe_inactive); 1686 1687 #if INCLUDE_JVMTI 1688 { 1689 Label L_done; 1690 const Register local0 = rlocals; 1691 1692 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1693 __ jcc(Assembler::notEqual, L_done); 1694 1695 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1696 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1697 1698 __ get_method(rdx); 1699 __ movptr(rax, Address(local0, 0)); 1700 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1701 1702 __ testptr(rax, rax); 1703 __ jcc(Assembler::zero, L_done); 1704 1705 __ movptr(Address(rbx, 0), rax); 1706 __ bind(L_done); 1707 } 1708 #endif // INCLUDE_JVMTI 1709 1710 __ dispatch_next(vtos); 1711 // end of PopFrame support 1712 1713 Interpreter::_remove_activation_entry = __ pc(); 1714 1715 // preserve exception over this code sequence 1716 __ pop_ptr(rax); 1717 NOT_LP64(__ get_thread(thread)); 1718 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1719 // remove the activation (without doing throws on illegalMonitorExceptions) 1720 __ remove_activation(vtos, rdx, false, true, false); 1721 // restore exception 1722 NOT_LP64(__ get_thread(thread)); 1723 __ get_vm_result(rax, thread); 1724 1725 // In between activations - previous activation type unknown yet 1726 // compute continuation point - the continuation point expects the 1727 // following registers set up: 1728 // 1729 // rax: exception 1730 // rdx: return address/pc that threw exception 1731 // rsp: expression stack of caller 1732 // rbp: ebp of caller 1733 __ push(rax); // save exception 1734 __ push(rdx); // save return address 1735 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1736 SharedRuntime::exception_handler_for_return_address), 1737 thread, rdx); 1738 __ mov(rbx, rax); // save exception handler 1739 __ pop(rdx); // restore return address 1740 __ pop(rax); // restore exception 1741 // Note that an "issuing PC" is actually the next PC after the call 1742 __ jmp(rbx); // jump to exception 1743 // handler of caller 1744 } 1745 1746 1747 // 1748 // JVMTI ForceEarlyReturn support 1749 // 1750 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1751 address entry = __ pc(); 1752 1753 __ restore_bcp(); 1754 __ restore_locals(); 1755 __ empty_expression_stack(); 1756 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1757 1758 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1759 NOT_LP64(__ get_thread(thread)); 1760 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1761 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1762 1763 // Clear the earlyret state 1764 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1765 1766 __ remove_activation(state, rsi, 1767 false, /* throw_monitor_exception */ 1768 false, /* install_monitor_exception */ 1769 true); /* notify_jvmdi */ 1770 __ jmp(rsi); 1771 1772 return entry; 1773 } // end of ForceEarlyReturn support 1774 1775 1776 //----------------------------------------------------------------------------- 1777 // Helper for vtos entry point generation 1778 1779 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1780 address& bep, 1781 address& cep, 1782 address& sep, 1783 address& aep, 1784 address& iep, 1785 address& lep, 1786 address& fep, 1787 address& dep, 1788 address& vep) { 1789 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1790 Label L; 1791 aep = __ pc(); __ push_ptr(); __ jmp(L); 1792 #ifndef _LP64 1793 fep = __ pc(); __ push(ftos); __ jmp(L); 1794 dep = __ pc(); __ push(dtos); __ jmp(L); 1795 #else 1796 fep = __ pc(); __ push_f(xmm0); __ jmp(L); 1797 dep = __ pc(); __ push_d(xmm0); __ jmp(L); 1798 #endif // _LP64 1799 lep = __ pc(); __ push_l(); __ jmp(L); 1800 bep = cep = sep = 1801 iep = __ pc(); __ push_i(); 1802 vep = __ pc(); 1803 __ bind(L); 1804 generate_and_dispatch(t); 1805 } 1806 1807 //----------------------------------------------------------------------------- 1808 1809 // Non-product code 1810 #ifndef PRODUCT 1811 1812 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1813 address entry = __ pc(); 1814 1815 #ifndef _LP64 1816 // prepare expression stack 1817 __ pop(rcx); // pop return address so expression stack is 'pure' 1818 __ push(state); // save tosca 1819 1820 // pass tosca registers as arguments & call tracer 1821 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1822 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1823 __ pop(state); // restore tosca 1824 1825 // return 1826 __ jmp(rcx); 1827 #else 1828 __ push(state); 1829 __ push(c_rarg0); 1830 __ push(c_rarg1); 1831 __ push(c_rarg2); 1832 __ push(c_rarg3); 1833 __ mov(c_rarg2, rax); // Pass itos 1834 #ifdef _WIN64 1835 __ movflt(xmm3, xmm0); // Pass ftos 1836 #endif 1837 __ call_VM(noreg, 1838 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1839 c_rarg1, c_rarg2, c_rarg3); 1840 __ pop(c_rarg3); 1841 __ pop(c_rarg2); 1842 __ pop(c_rarg1); 1843 __ pop(c_rarg0); 1844 __ pop(state); 1845 __ ret(0); // return from result handler 1846 #endif // _LP64 1847 1848 return entry; 1849 } 1850 1851 void TemplateInterpreterGenerator::count_bytecode() { 1852 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1853 } 1854 1855 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1856 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1857 } 1858 1859 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1860 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1861 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1862 __ orl(rbx, 1863 ((int) t->bytecode()) << 1864 BytecodePairHistogram::log2_number_of_codes); 1865 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1866 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1867 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1868 } 1869 1870 1871 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1872 // Call a little run-time stub to avoid blow-up for each bytecode. 1873 // The run-time runtime saves the right registers, depending on 1874 // the tosca in-state for the given template. 1875 1876 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1877 "entry must have been generated"); 1878 #ifndef _LP64 1879 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1880 #else 1881 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1882 __ andptr(rsp, -16); // align stack as required by ABI 1883 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1884 __ mov(rsp, r12); // restore sp 1885 __ reinit_heapbase(); 1886 #endif // _LP64 1887 } 1888 1889 1890 void TemplateInterpreterGenerator::stop_interpreter_at() { 1891 Label L; 1892 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1893 StopInterpreterAt); 1894 __ jcc(Assembler::notEqual, L); 1895 __ int3(); 1896 __ bind(L); 1897 } 1898 #endif // !PRODUCT