1 /* 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 #define __ _masm-> 51 52 // Size of interpreter code. Increase if too small. Interpreter will 53 // fail with a guarantee ("not enough space for interpreter generation"); 54 // if too small. 55 // Run with +PrintInterpreter to get the VM to print out the size. 56 // Max size with JVMTI 57 #ifdef AMD64 58 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; 59 #else 60 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 61 #endif // AMD64 62 63 // Global Register Names 64 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 65 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 66 67 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 68 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 69 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 70 71 72 //----------------------------------------------------------------------------- 73 74 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 75 address entry = __ pc(); 76 77 #ifdef ASSERT 78 { 79 Label L; 80 __ lea(rax, Address(rbp, 81 frame::interpreter_frame_monitor_block_top_offset * 82 wordSize)); 83 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 84 // grows negative) 85 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 86 __ stop ("interpreter frame not set up"); 87 __ bind(L); 88 } 89 #endif // ASSERT 90 // Restore bcp under the assumption that the current frame is still 91 // interpreted 92 __ restore_bcp(); 93 94 // expression stack must be empty before entering the VM if an 95 // exception happened 96 __ empty_expression_stack(); 97 // throw exception 98 __ call_VM(noreg, 99 CAST_FROM_FN_PTR(address, 100 InterpreterRuntime::throw_StackOverflowError)); 101 return entry; 102 } 103 104 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 105 const char* name) { 106 address entry = __ pc(); 107 // expression stack must be empty before entering the VM if an 108 // exception happened 109 __ empty_expression_stack(); 110 // setup parameters 111 // ??? convention: expect aberrant index in register ebx 112 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 113 __ lea(rarg, ExternalAddress((address)name)); 114 __ call_VM(noreg, 115 CAST_FROM_FN_PTR(address, 116 InterpreterRuntime:: 117 throw_ArrayIndexOutOfBoundsException), 118 rarg, rbx); 119 return entry; 120 } 121 122 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 123 address entry = __ pc(); 124 125 // object is at TOS 126 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 127 __ pop(rarg); 128 129 // expression stack must be empty before entering the VM if an 130 // exception happened 131 __ empty_expression_stack(); 132 133 __ call_VM(noreg, 134 CAST_FROM_FN_PTR(address, 135 InterpreterRuntime:: 136 throw_ClassCastException), 137 rarg); 138 return entry; 139 } 140 141 address TemplateInterpreterGenerator::generate_exception_handler_common( 142 const char* name, const char* message, bool pass_oop) { 143 assert(!pass_oop || message == NULL, "either oop or message but not both"); 144 address entry = __ pc(); 145 146 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 147 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 148 149 if (pass_oop) { 150 // object is at TOS 151 __ pop(rarg2); 152 } 153 // expression stack must be empty before entering the VM if an 154 // exception happened 155 __ empty_expression_stack(); 156 // setup parameters 157 __ lea(rarg, ExternalAddress((address)name)); 158 if (pass_oop) { 159 __ call_VM(rax, CAST_FROM_FN_PTR(address, 160 InterpreterRuntime:: 161 create_klass_exception), 162 rarg, rarg2); 163 } else { 164 __ lea(rarg2, ExternalAddress((address)message)); 165 __ call_VM(rax, 166 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 167 rarg, rarg2); 168 } 169 // throw exception 170 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 171 return entry; 172 } 173 174 175 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 176 address entry = __ pc(); 177 // NULL last_sp until next java call 178 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 179 __ dispatch_next(state); 180 return entry; 181 } 182 183 184 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 185 address entry = __ pc(); 186 187 #ifndef _LP64 188 #ifdef COMPILER2 189 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 190 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 191 for (int i = 1; i < 8; i++) { 192 __ ffree(i); 193 } 194 } else if (UseSSE < 2) { 195 __ empty_FPU_stack(); 196 } 197 #endif // COMPILER2 198 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 199 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 200 } else { 201 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 202 } 203 204 if (state == ftos) { 205 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 206 } else if (state == dtos) { 207 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 208 } 209 #endif // _LP64 210 211 // Restore stack bottom in case i2c adjusted stack 212 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 213 // and NULL it as marker that esp is now tos until next java call 214 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 215 216 __ restore_bcp(); 217 __ restore_locals(); 218 219 if (state == atos) { 220 Register mdp = rbx; 221 Register tmp = rcx; 222 __ profile_return_type(mdp, rax, tmp); 223 } 224 225 const Register cache = rbx; 226 const Register index = rcx; 227 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 228 229 const Register flags = cache; 230 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 231 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 232 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 233 __ dispatch_next(state, step); 234 235 return entry; 236 } 237 238 239 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 240 address entry = __ pc(); 241 242 #ifndef _LP64 243 if (state == ftos) { 244 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 245 } else if (state == dtos) { 246 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 247 } 248 #endif // _LP64 249 250 // NULL last_sp until next java call 251 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 252 __ restore_bcp(); 253 __ restore_locals(); 254 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 255 NOT_LP64(__ get_thread(thread)); 256 #if INCLUDE_JVMCI 257 // Check if we need to take lock at entry of synchronized method. This can 258 // only occur on method entry so emit it only for vtos with step 0. 259 if ((UseJVMCICompiler || UseAOT) && state == vtos && step == 0) { 260 Label L; 261 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 262 __ jcc(Assembler::zero, L); 263 // Clear flag. 264 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 265 // Satisfy calling convention for lock_method(). 266 __ get_method(rbx); 267 // Take lock. 268 lock_method(); 269 __ bind(L); 270 } else { 271 #ifdef ASSERT 272 if (UseJVMCICompiler) { 273 Label L; 274 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 275 __ jccb(Assembler::zero, L); 276 __ stop("unexpected pending monitor in deopt entry"); 277 __ bind(L); 278 } 279 #endif 280 } 281 #endif 282 // handle exceptions 283 { 284 Label L; 285 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 286 __ jcc(Assembler::zero, L); 287 __ call_VM(noreg, 288 CAST_FROM_FN_PTR(address, 289 InterpreterRuntime::throw_pending_exception)); 290 __ should_not_reach_here(); 291 __ bind(L); 292 } 293 __ dispatch_next(state, step); 294 return entry; 295 } 296 297 address TemplateInterpreterGenerator::generate_result_handler_for( 298 BasicType type) { 299 address entry = __ pc(); 300 switch (type) { 301 case T_BOOLEAN: __ c2bool(rax); break; 302 #ifndef _LP64 303 case T_CHAR : __ andptr(rax, 0xFFFF); break; 304 #else 305 case T_CHAR : __ movzwl(rax, rax); break; 306 #endif // _LP64 307 case T_BYTE : __ sign_extend_byte(rax); break; 308 case T_SHORT : __ sign_extend_short(rax); break; 309 case T_INT : /* nothing to do */ break; 310 case T_LONG : /* nothing to do */ break; 311 case T_VOID : /* nothing to do */ break; 312 #ifndef _LP64 313 case T_DOUBLE : 314 case T_FLOAT : 315 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 316 __ pop(t); // remove return address first 317 // Must return a result for interpreter or compiler. In SSE 318 // mode, results are returned in xmm0 and the FPU stack must 319 // be empty. 320 if (type == T_FLOAT && UseSSE >= 1) { 321 // Load ST0 322 __ fld_d(Address(rsp, 0)); 323 // Store as float and empty fpu stack 324 __ fstp_s(Address(rsp, 0)); 325 // and reload 326 __ movflt(xmm0, Address(rsp, 0)); 327 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 328 __ movdbl(xmm0, Address(rsp, 0)); 329 } else { 330 // restore ST0 331 __ fld_d(Address(rsp, 0)); 332 } 333 // and pop the temp 334 __ addptr(rsp, 2 * wordSize); 335 __ push(t); // restore return address 336 } 337 break; 338 #else 339 case T_FLOAT : /* nothing to do */ break; 340 case T_DOUBLE : /* nothing to do */ break; 341 #endif // _LP64 342 343 case T_OBJECT : 344 // retrieve result from frame 345 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 346 // and verify it 347 __ verify_oop(rax); 348 break; 349 default : ShouldNotReachHere(); 350 } 351 __ ret(0); // return from result handler 352 return entry; 353 } 354 355 address TemplateInterpreterGenerator::generate_safept_entry_for( 356 TosState state, 357 address runtime_entry) { 358 address entry = __ pc(); 359 __ push(state); 360 __ call_VM(noreg, runtime_entry); 361 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 362 return entry; 363 } 364 365 366 367 // Helpers for commoning out cases in the various type of method entries. 368 // 369 370 371 // increment invocation count & check for overflow 372 // 373 // Note: checking for negative value instead of overflow 374 // so we have a 'sticky' overflow test 375 // 376 // rbx: method 377 // rcx: invocation counter 378 // 379 void TemplateInterpreterGenerator::generate_counter_incr( 380 Label* overflow, 381 Label* profile_method, 382 Label* profile_method_continue) { 383 Label done; 384 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 385 if (TieredCompilation) { 386 int increment = InvocationCounter::count_increment; 387 Label no_mdo; 388 if (ProfileInterpreter) { 389 // Are we profiling? 390 __ movptr(rax, Address(rbx, Method::method_data_offset())); 391 __ testptr(rax, rax); 392 __ jccb(Assembler::zero, no_mdo); 393 // Increment counter in the MDO 394 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 395 in_bytes(InvocationCounter::counter_offset())); 396 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 397 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 398 __ jmp(done); 399 } 400 __ bind(no_mdo); 401 // Increment counter in MethodCounters 402 const Address invocation_counter(rax, 403 MethodCounters::invocation_counter_offset() + 404 InvocationCounter::counter_offset()); 405 __ get_method_counters(rbx, rax, done); 406 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 407 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 408 false, Assembler::zero, overflow); 409 __ bind(done); 410 } else { // not TieredCompilation 411 const Address backedge_counter(rax, 412 MethodCounters::backedge_counter_offset() + 413 InvocationCounter::counter_offset()); 414 const Address invocation_counter(rax, 415 MethodCounters::invocation_counter_offset() + 416 InvocationCounter::counter_offset()); 417 418 __ get_method_counters(rbx, rax, done); 419 420 if (ProfileInterpreter) { 421 __ incrementl(Address(rax, 422 MethodCounters::interpreter_invocation_counter_offset())); 423 } 424 // Update standard invocation counters 425 __ movl(rcx, invocation_counter); 426 __ incrementl(rcx, InvocationCounter::count_increment); 427 __ movl(invocation_counter, rcx); // save invocation count 428 429 __ movl(rax, backedge_counter); // load backedge counter 430 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 431 432 __ addl(rcx, rax); // add both counters 433 434 // profile_method is non-null only for interpreted method so 435 // profile_method != NULL == !native_call 436 437 if (ProfileInterpreter && profile_method != NULL) { 438 // Test to see if we should create a method data oop 439 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 440 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 441 __ jcc(Assembler::less, *profile_method_continue); 442 443 // if no method data exists, go to profile_method 444 __ test_method_data_pointer(rax, *profile_method); 445 } 446 447 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 448 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 449 __ jcc(Assembler::aboveEqual, *overflow); 450 __ bind(done); 451 } 452 } 453 454 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 455 456 // Asm interpreter on entry 457 // r14/rdi - locals 458 // r13/rsi - bcp 459 // rbx - method 460 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 461 // rbp - interpreter frame 462 463 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 464 // Everything as it was on entry 465 // rdx is not restored. Doesn't appear to really be set. 466 467 // InterpreterRuntime::frequency_counter_overflow takes two 468 // arguments, the first (thread) is passed by call_VM, the second 469 // indicates if the counter overflow occurs at a backwards branch 470 // (NULL bcp). We pass zero for it. The call returns the address 471 // of the verified entry point for the method or NULL if the 472 // compilation did not complete (either went background or bailed 473 // out). 474 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 475 __ movl(rarg, 0); 476 __ call_VM(noreg, 477 CAST_FROM_FN_PTR(address, 478 InterpreterRuntime::frequency_counter_overflow), 479 rarg); 480 481 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 482 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 483 // and jump to the interpreted entry. 484 __ jmp(do_continue, relocInfo::none); 485 } 486 487 // See if we've got enough room on the stack for locals plus overhead below 488 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 489 // without going through the signal handler, i.e., reserved and yellow zones 490 // will not be made usable. The shadow zone must suffice to handle the 491 // overflow. 492 // The expression stack grows down incrementally, so the normal guard 493 // page mechanism will work for that. 494 // 495 // NOTE: Since the additional locals are also always pushed (wasn't 496 // obvious in generate_fixed_frame) so the guard should work for them 497 // too. 498 // 499 // Args: 500 // rdx: number of additional locals this frame needs (what we must check) 501 // rbx: Method* 502 // 503 // Kills: 504 // rax 505 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 506 507 // monitor entry size: see picture of stack in frame_x86.hpp 508 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 509 510 // total overhead size: entry_size + (saved rbp through expr stack 511 // bottom). be sure to change this if you add/subtract anything 512 // to/from the overhead area 513 const int overhead_size = 514 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 515 516 const int page_size = os::vm_page_size(); 517 518 Label after_frame_check; 519 520 // see if the frame is greater than one page in size. If so, 521 // then we need to verify there is enough stack space remaining 522 // for the additional locals. 523 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 524 __ jcc(Assembler::belowEqual, after_frame_check); 525 526 // compute rsp as if this were going to be the last frame on 527 // the stack before the red zone 528 529 Label after_frame_check_pop; 530 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 531 #ifndef _LP64 532 __ push(thread); 533 __ get_thread(thread); 534 #endif 535 536 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 537 538 // locals + overhead, in bytes 539 __ mov(rax, rdx); 540 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 541 __ addptr(rax, overhead_size); 542 543 #ifdef ASSERT 544 Label limit_okay; 545 // Verify that thread stack overflow limit is non-zero. 546 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 547 __ jcc(Assembler::notEqual, limit_okay); 548 __ stop("stack overflow limit is zero"); 549 __ bind(limit_okay); 550 #endif 551 552 // Add locals/frame size to stack limit. 553 __ addptr(rax, stack_limit); 554 555 // Check against the current stack bottom. 556 __ cmpptr(rsp, rax); 557 558 __ jcc(Assembler::above, after_frame_check_pop); 559 NOT_LP64(__ pop(rsi)); // get saved bcp 560 561 // Restore sender's sp as SP. This is necessary if the sender's 562 // frame is an extended compiled frame (see gen_c2i_adapter()) 563 // and safer anyway in case of JSR292 adaptations. 564 565 __ pop(rax); // return address must be moved if SP is changed 566 __ mov(rsp, rbcp); 567 __ push(rax); 568 569 // Note: the restored frame is not necessarily interpreted. 570 // Use the shared runtime version of the StackOverflowError. 571 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 572 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 573 // all done with frame size check 574 __ bind(after_frame_check_pop); 575 NOT_LP64(__ pop(rsi)); 576 577 // all done with frame size check 578 __ bind(after_frame_check); 579 } 580 581 // Allocate monitor and lock method (asm interpreter) 582 // 583 // Args: 584 // rbx: Method* 585 // r14/rdi: locals 586 // 587 // Kills: 588 // rax 589 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 590 // rscratch1, rscratch2 (scratch regs) 591 void TemplateInterpreterGenerator::lock_method() { 592 // synchronize method 593 const Address access_flags(rbx, Method::access_flags_offset()); 594 const Address monitor_block_top( 595 rbp, 596 frame::interpreter_frame_monitor_block_top_offset * wordSize); 597 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 598 599 #ifdef ASSERT 600 { 601 Label L; 602 __ movl(rax, access_flags); 603 __ testl(rax, JVM_ACC_SYNCHRONIZED); 604 __ jcc(Assembler::notZero, L); 605 __ stop("method doesn't need synchronization"); 606 __ bind(L); 607 } 608 #endif // ASSERT 609 610 // get synchronization object 611 { 612 Label done; 613 __ movl(rax, access_flags); 614 __ testl(rax, JVM_ACC_STATIC); 615 // get receiver (assume this is frequent case) 616 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 617 __ jcc(Assembler::zero, done); 618 __ load_mirror(rax, rbx); 619 620 #ifdef ASSERT 621 { 622 Label L; 623 __ testptr(rax, rax); 624 __ jcc(Assembler::notZero, L); 625 __ stop("synchronization object is NULL"); 626 __ bind(L); 627 } 628 #endif // ASSERT 629 630 __ bind(done); 631 } 632 633 // add space for monitor & lock 634 __ subptr(rsp, entry_size); // add space for a monitor entry 635 __ movptr(monitor_block_top, rsp); // set new monitor block top 636 // store object 637 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 638 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 639 __ movptr(lockreg, rsp); // object address 640 __ lock_object(lockreg); 641 } 642 643 // Generate a fixed interpreter frame. This is identical setup for 644 // interpreted methods and for native methods hence the shared code. 645 // 646 // Args: 647 // rax: return address 648 // rbx: Method* 649 // r14/rdi: pointer to locals 650 // r13/rsi: sender sp 651 // rdx: cp cache 652 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 653 // initialize fixed part of activation frame 654 __ push(rax); // save return address 655 __ enter(); // save old & set new rbp 656 __ push(rbcp); // set sender sp 657 __ push((int)NULL_WORD); // leave last_sp as null 658 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 659 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 660 __ push(rbx); // save Method* 661 // Get mirror and store it in the frame as GC root for this Method* 662 __ load_mirror(rdx, rbx); 663 __ push(rdx); 664 if (ProfileInterpreter) { 665 Label method_data_continue; 666 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 667 __ testptr(rdx, rdx); 668 __ jcc(Assembler::zero, method_data_continue); 669 __ addptr(rdx, in_bytes(MethodData::data_offset())); 670 __ bind(method_data_continue); 671 __ push(rdx); // set the mdp (method data pointer) 672 } else { 673 __ push(0); 674 } 675 676 __ movptr(rdx, Address(rbx, Method::const_offset())); 677 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 678 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 679 __ push(rdx); // set constant pool cache 680 __ push(rlocals); // set locals pointer 681 if (native_call) { 682 __ push(0); // no bcp 683 } else { 684 __ push(rbcp); // set bcp 685 } 686 __ push(0); // reserve word for pointer to expression stack bottom 687 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 688 } 689 690 // End of helpers 691 692 // Method entry for java.lang.ref.Reference.get. 693 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 694 #if INCLUDE_ALL_GCS 695 // Code: _aload_0, _getfield, _areturn 696 // parameter size = 1 697 // 698 // The code that gets generated by this routine is split into 2 parts: 699 // 1. The "intrinsified" code for G1 (or any SATB based GC), 700 // 2. The slow path - which is an expansion of the regular method entry. 701 // 702 // Notes:- 703 // * In the G1 code we do not check whether we need to block for 704 // a safepoint. If G1 is enabled then we must execute the specialized 705 // code for Reference.get (except when the Reference object is null) 706 // so that we can log the value in the referent field with an SATB 707 // update buffer. 708 // If the code for the getfield template is modified so that the 709 // G1 pre-barrier code is executed when the current method is 710 // Reference.get() then going through the normal method entry 711 // will be fine. 712 // * The G1 code can, however, check the receiver object (the instance 713 // of java.lang.Reference) and jump to the slow path if null. If the 714 // Reference object is null then we obviously cannot fetch the referent 715 // and so we don't need to call the G1 pre-barrier. Thus we can use the 716 // regular method entry code to generate the NPE. 717 // 718 // rbx: Method* 719 720 // r13: senderSP must preserve for slow path, set SP to it on fast path 721 722 address entry = __ pc(); 723 724 const int referent_offset = java_lang_ref_Reference::referent_offset; 725 guarantee(referent_offset > 0, "referent offset not initialized"); 726 727 if (UseG1GC) { 728 Label slow_path; 729 // rbx: method 730 731 // Check if local 0 != NULL 732 // If the receiver is null then it is OK to jump to the slow path. 733 __ movptr(rax, Address(rsp, wordSize)); 734 735 __ testptr(rax, rax); 736 __ jcc(Assembler::zero, slow_path); 737 738 // rax: local 0 739 // rbx: method (but can be used as scratch now) 740 // rdx: scratch 741 // rdi: scratch 742 743 // Preserve the sender sp in case the pre-barrier 744 // calls the runtime 745 NOT_LP64(__ push(rsi)); 746 747 // Generate the G1 pre-barrier code to log the value of 748 // the referent field in an SATB buffer. 749 750 // Load the value of the referent field. 751 const Address field_address(rax, referent_offset); 752 __ load_heap_oop(rax, field_address); 753 754 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 755 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 756 NOT_LP64(__ get_thread(thread)); 757 758 // Generate the G1 pre-barrier code to log the value of 759 // the referent field in an SATB buffer. 760 __ g1_write_barrier_pre(noreg /* obj */, 761 rax /* pre_val */, 762 thread /* thread */, 763 rbx /* tmp */, 764 true /* tosca_live */, 765 true /* expand_call */); 766 767 // _areturn 768 NOT_LP64(__ pop(rsi)); // get sender sp 769 __ pop(rdi); // get return address 770 __ mov(rsp, sender_sp); // set sp to sender sp 771 __ jmp(rdi); 772 __ ret(0); 773 774 // generate a vanilla interpreter entry as the slow path 775 __ bind(slow_path); 776 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 777 return entry; 778 } 779 #endif // INCLUDE_ALL_GCS 780 781 // If G1 is not enabled then attempt to go through the accessor entry point 782 // Reference.get is an accessor 783 return NULL; 784 } 785 786 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 787 // Quick & dirty stack overflow checking: bang the stack & handle trap. 788 // Note that we do the banging after the frame is setup, since the exception 789 // handling code expects to find a valid interpreter frame on the stack. 790 // Doing the banging earlier fails if the caller frame is not an interpreter 791 // frame. 792 // (Also, the exception throwing code expects to unlock any synchronized 793 // method receiever, so do the banging after locking the receiver.) 794 795 // Bang each page in the shadow zone. We can't assume it's been done for 796 // an interpreter frame with greater than a page of locals, so each page 797 // needs to be checked. Only true for non-native. 798 if (UseStackBanging) { 799 const int page_size = os::vm_page_size(); 800 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 801 const int start_page = native_call ? n_shadow_pages : 1; 802 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 803 __ bang_stack_with_offset(pages*page_size); 804 } 805 } 806 } 807 808 // Interpreter stub for calling a native method. (asm interpreter) 809 // This sets up a somewhat different looking stack for calling the 810 // native method than the typical interpreter frame setup. 811 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 812 // determine code generation flags 813 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 814 815 // rbx: Method* 816 // rbcp: sender sp 817 818 address entry_point = __ pc(); 819 820 const Address constMethod (rbx, Method::const_offset()); 821 const Address access_flags (rbx, Method::access_flags_offset()); 822 const Address size_of_parameters(rcx, ConstMethod:: 823 size_of_parameters_offset()); 824 825 826 // get parameter size (always needed) 827 __ movptr(rcx, constMethod); 828 __ load_unsigned_short(rcx, size_of_parameters); 829 830 // native calls don't need the stack size check since they have no 831 // expression stack and the arguments are already on the stack and 832 // we only add a handful of words to the stack 833 834 // rbx: Method* 835 // rcx: size of parameters 836 // rbcp: sender sp 837 __ pop(rax); // get return address 838 839 // for natives the size of locals is zero 840 841 // compute beginning of parameters 842 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 843 844 // add 2 zero-initialized slots for native calls 845 // initialize result_handler slot 846 __ push((int) NULL_WORD); 847 // slot for oop temp 848 // (static native method holder mirror/jni oop result) 849 __ push((int) NULL_WORD); 850 851 // initialize fixed part of activation frame 852 generate_fixed_frame(true); 853 854 // make sure method is native & not abstract 855 #ifdef ASSERT 856 __ movl(rax, access_flags); 857 { 858 Label L; 859 __ testl(rax, JVM_ACC_NATIVE); 860 __ jcc(Assembler::notZero, L); 861 __ stop("tried to execute non-native method as native"); 862 __ bind(L); 863 } 864 { 865 Label L; 866 __ testl(rax, JVM_ACC_ABSTRACT); 867 __ jcc(Assembler::zero, L); 868 __ stop("tried to execute abstract method in interpreter"); 869 __ bind(L); 870 } 871 #endif 872 873 // Since at this point in the method invocation the exception handler 874 // would try to exit the monitor of synchronized methods which hasn't 875 // been entered yet, we set the thread local variable 876 // _do_not_unlock_if_synchronized to true. The remove_activation will 877 // check this flag. 878 879 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 880 NOT_LP64(__ get_thread(thread1)); 881 const Address do_not_unlock_if_synchronized(thread1, 882 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 883 __ movbool(do_not_unlock_if_synchronized, true); 884 885 // increment invocation count & check for overflow 886 Label invocation_counter_overflow; 887 if (inc_counter) { 888 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 889 } 890 891 Label continue_after_compile; 892 __ bind(continue_after_compile); 893 894 bang_stack_shadow_pages(true); 895 896 // reset the _do_not_unlock_if_synchronized flag 897 NOT_LP64(__ get_thread(thread1)); 898 __ movbool(do_not_unlock_if_synchronized, false); 899 900 // check for synchronized methods 901 // Must happen AFTER invocation_counter check and stack overflow check, 902 // so method is not locked if overflows. 903 if (synchronized) { 904 lock_method(); 905 } else { 906 // no synchronization necessary 907 #ifdef ASSERT 908 { 909 Label L; 910 __ movl(rax, access_flags); 911 __ testl(rax, JVM_ACC_SYNCHRONIZED); 912 __ jcc(Assembler::zero, L); 913 __ stop("method needs synchronization"); 914 __ bind(L); 915 } 916 #endif 917 } 918 919 // start execution 920 #ifdef ASSERT 921 { 922 Label L; 923 const Address monitor_block_top(rbp, 924 frame::interpreter_frame_monitor_block_top_offset * wordSize); 925 __ movptr(rax, monitor_block_top); 926 __ cmpptr(rax, rsp); 927 __ jcc(Assembler::equal, L); 928 __ stop("broken stack frame setup in interpreter"); 929 __ bind(L); 930 } 931 #endif 932 933 // jvmti support 934 __ notify_method_entry(); 935 936 // work registers 937 const Register method = rbx; 938 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 939 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 940 941 // allocate space for parameters 942 __ get_method(method); 943 __ movptr(t, Address(method, Method::const_offset())); 944 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 945 946 #ifndef _LP64 947 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 948 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 949 __ subptr(rsp, t); 950 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 951 #else 952 __ shll(t, Interpreter::logStackElementSize); 953 954 __ subptr(rsp, t); 955 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 956 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 957 #endif // _LP64 958 959 // get signature handler 960 { 961 Label L; 962 __ movptr(t, Address(method, Method::signature_handler_offset())); 963 __ testptr(t, t); 964 __ jcc(Assembler::notZero, L); 965 __ call_VM(noreg, 966 CAST_FROM_FN_PTR(address, 967 InterpreterRuntime::prepare_native_call), 968 method); 969 __ get_method(method); 970 __ movptr(t, Address(method, Method::signature_handler_offset())); 971 __ bind(L); 972 } 973 974 // call signature handler 975 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 976 "adjust this code"); 977 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 978 "adjust this code"); 979 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 980 "adjust this code"); 981 982 // The generated handlers do not touch RBX (the method oop). 983 // However, large signatures cannot be cached and are generated 984 // each time here. The slow-path generator can do a GC on return, 985 // so we must reload it after the call. 986 __ call(t); 987 __ get_method(method); // slow path can do a GC, reload RBX 988 989 990 // result handler is in rax 991 // set result handler 992 __ movptr(Address(rbp, 993 (frame::interpreter_frame_result_handler_offset) * wordSize), 994 rax); 995 996 // pass mirror handle if static call 997 { 998 Label L; 999 __ movl(t, Address(method, Method::access_flags_offset())); 1000 __ testl(t, JVM_ACC_STATIC); 1001 __ jcc(Assembler::zero, L); 1002 // get mirror 1003 __ load_mirror(t, method); 1004 // copy mirror into activation frame 1005 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1006 t); 1007 // pass handle to mirror 1008 #ifndef _LP64 1009 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1010 __ movptr(Address(rsp, wordSize), t); 1011 #else 1012 __ lea(c_rarg1, 1013 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1014 #endif // _LP64 1015 __ bind(L); 1016 } 1017 1018 // get native function entry point 1019 { 1020 Label L; 1021 __ movptr(rax, Address(method, Method::native_function_offset())); 1022 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1023 __ cmpptr(rax, unsatisfied.addr()); 1024 __ jcc(Assembler::notEqual, L); 1025 __ call_VM(noreg, 1026 CAST_FROM_FN_PTR(address, 1027 InterpreterRuntime::prepare_native_call), 1028 method); 1029 __ get_method(method); 1030 __ movptr(rax, Address(method, Method::native_function_offset())); 1031 __ bind(L); 1032 } 1033 1034 // pass JNIEnv 1035 #ifndef _LP64 1036 __ get_thread(thread); 1037 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1038 __ movptr(Address(rsp, 0), t); 1039 1040 // set_last_Java_frame_before_call 1041 // It is enough that the pc() 1042 // points into the right code segment. It does not have to be the correct return pc. 1043 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1044 #else 1045 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1046 1047 // It is enough that the pc() points into the right code 1048 // segment. It does not have to be the correct return pc. 1049 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1050 #endif // _LP64 1051 1052 // change thread state 1053 #ifdef ASSERT 1054 { 1055 Label L; 1056 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1057 __ cmpl(t, _thread_in_Java); 1058 __ jcc(Assembler::equal, L); 1059 __ stop("Wrong thread state in native stub"); 1060 __ bind(L); 1061 } 1062 #endif 1063 1064 // Change state to native 1065 1066 __ movl(Address(thread, JavaThread::thread_state_offset()), 1067 _thread_in_native); 1068 1069 // Call the native method. 1070 __ call(rax); 1071 // 32: result potentially in rdx:rax or ST0 1072 // 64: result potentially in rax or xmm0 1073 1074 // Verify or restore cpu control state after JNI call 1075 __ restore_cpu_control_state_after_jni(); 1076 1077 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1078 // in order to extract the result of a method call. If the order of these 1079 // pushes change or anything else is added to the stack then the code in 1080 // interpreter_frame_result must also change. 1081 1082 #ifndef _LP64 1083 // save potential result in ST(0) & rdx:rax 1084 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1085 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1086 // It is safe to do this push because state is _thread_in_native and return address will be found 1087 // via _last_native_pc and not via _last_jave_sp 1088 1089 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1090 // If the order changes or anything else is added to the stack the code in 1091 // interpreter_frame_result will have to be changed. 1092 1093 { Label L; 1094 Label push_double; 1095 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1096 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1097 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1098 float_handler.addr()); 1099 __ jcc(Assembler::equal, push_double); 1100 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1101 double_handler.addr()); 1102 __ jcc(Assembler::notEqual, L); 1103 __ bind(push_double); 1104 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1105 __ bind(L); 1106 } 1107 #else 1108 __ push(dtos); 1109 #endif // _LP64 1110 1111 __ push(ltos); 1112 1113 // change thread state 1114 NOT_LP64(__ get_thread(thread)); 1115 __ movl(Address(thread, JavaThread::thread_state_offset()), 1116 _thread_in_native_trans); 1117 1118 if (os::is_MP()) { 1119 if (UseMembar) { 1120 // Force this write out before the read below 1121 __ membar(Assembler::Membar_mask_bits( 1122 Assembler::LoadLoad | Assembler::LoadStore | 1123 Assembler::StoreLoad | Assembler::StoreStore)); 1124 } else { 1125 // Write serialization page so VM thread can do a pseudo remote membar. 1126 // We use the current thread pointer to calculate a thread specific 1127 // offset to write to within the page. This minimizes bus traffic 1128 // due to cache line collision. 1129 __ serialize_memory(thread, rcx); 1130 } 1131 } 1132 1133 #ifndef _LP64 1134 if (AlwaysRestoreFPU) { 1135 // Make sure the control word is correct. 1136 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1137 } 1138 #endif // _LP64 1139 1140 // check for safepoint operation in progress and/or pending suspend requests 1141 { 1142 Label Continue; 1143 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1144 SafepointSynchronize::_not_synchronized); 1145 1146 Label L; 1147 __ jcc(Assembler::notEqual, L); 1148 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1149 __ jcc(Assembler::equal, Continue); 1150 __ bind(L); 1151 1152 // Don't use call_VM as it will see a possible pending exception 1153 // and forward it and never return here preventing us from 1154 // clearing _last_native_pc down below. Also can't use 1155 // call_VM_leaf either as it will check to see if r13 & r14 are 1156 // preserved and correspond to the bcp/locals pointers. So we do a 1157 // runtime call by hand. 1158 // 1159 #ifndef _LP64 1160 __ push(thread); 1161 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1162 JavaThread::check_special_condition_for_native_trans))); 1163 __ increment(rsp, wordSize); 1164 __ get_thread(thread); 1165 #else 1166 __ mov(c_rarg0, r15_thread); 1167 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1168 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1169 __ andptr(rsp, -16); // align stack as required by ABI 1170 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1171 __ mov(rsp, r12); // restore sp 1172 __ reinit_heapbase(); 1173 #endif // _LP64 1174 __ bind(Continue); 1175 } 1176 1177 // change thread state 1178 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1179 1180 // reset_last_Java_frame 1181 __ reset_last_Java_frame(thread, true); 1182 1183 if (CheckJNICalls) { 1184 // clear_pending_jni_exception_check 1185 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1186 } 1187 1188 // reset handle block 1189 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1190 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1191 1192 // If result is an oop unbox and store it in frame where gc will see it 1193 // and result handler will pick it up 1194 1195 { 1196 Label no_oop, store_result; 1197 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1198 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1199 __ jcc(Assembler::notEqual, no_oop); 1200 // retrieve result 1201 __ pop(ltos); 1202 __ testptr(rax, rax); 1203 __ jcc(Assembler::zero, store_result); 1204 __ movptr(rax, Address(rax, 0)); 1205 __ bind(store_result); 1206 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1207 // keep stack depth as expected by pushing oop which will eventually be discarded 1208 __ push(ltos); 1209 __ bind(no_oop); 1210 } 1211 1212 1213 { 1214 Label no_reguard; 1215 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1216 JavaThread::stack_guard_yellow_reserved_disabled); 1217 __ jcc(Assembler::notEqual, no_reguard); 1218 1219 __ pusha(); // XXX only save smashed registers 1220 #ifndef _LP64 1221 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1222 __ popa(); 1223 #else 1224 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1225 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1226 __ andptr(rsp, -16); // align stack as required by ABI 1227 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1228 __ mov(rsp, r12); // restore sp 1229 __ popa(); // XXX only restore smashed registers 1230 __ reinit_heapbase(); 1231 #endif // _LP64 1232 1233 __ bind(no_reguard); 1234 } 1235 1236 1237 // The method register is junk from after the thread_in_native transition 1238 // until here. Also can't call_VM until the bcp has been 1239 // restored. Need bcp for throwing exception below so get it now. 1240 __ get_method(method); 1241 1242 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1243 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1244 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1245 1246 // handle exceptions (exception handling will handle unlocking!) 1247 { 1248 Label L; 1249 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1250 __ jcc(Assembler::zero, L); 1251 // Note: At some point we may want to unify this with the code 1252 // used in call_VM_base(); i.e., we should use the 1253 // StubRoutines::forward_exception code. For now this doesn't work 1254 // here because the rsp is not correctly set at this point. 1255 __ MacroAssembler::call_VM(noreg, 1256 CAST_FROM_FN_PTR(address, 1257 InterpreterRuntime::throw_pending_exception)); 1258 __ should_not_reach_here(); 1259 __ bind(L); 1260 } 1261 1262 // do unlocking if necessary 1263 { 1264 Label L; 1265 __ movl(t, Address(method, Method::access_flags_offset())); 1266 __ testl(t, JVM_ACC_SYNCHRONIZED); 1267 __ jcc(Assembler::zero, L); 1268 // the code below should be shared with interpreter macro 1269 // assembler implementation 1270 { 1271 Label unlock; 1272 // BasicObjectLock will be first in list, since this is a 1273 // synchronized method. However, need to check that the object 1274 // has not been unlocked by an explicit monitorexit bytecode. 1275 const Address monitor(rbp, 1276 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1277 wordSize - (int)sizeof(BasicObjectLock))); 1278 1279 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1280 1281 // monitor expect in c_rarg1 for slow unlock path 1282 __ lea(regmon, monitor); // address of first monitor 1283 1284 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1285 __ testptr(t, t); 1286 __ jcc(Assembler::notZero, unlock); 1287 1288 // Entry already unlocked, need to throw exception 1289 __ MacroAssembler::call_VM(noreg, 1290 CAST_FROM_FN_PTR(address, 1291 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1292 __ should_not_reach_here(); 1293 1294 __ bind(unlock); 1295 __ unlock_object(regmon); 1296 } 1297 __ bind(L); 1298 } 1299 1300 // jvmti support 1301 // Note: This must happen _after_ handling/throwing any exceptions since 1302 // the exception handler code notifies the runtime of method exits 1303 // too. If this happens before, method entry/exit notifications are 1304 // not properly paired (was bug - gri 11/22/99). 1305 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1306 1307 // restore potential result in edx:eax, call result handler to 1308 // restore potential result in ST0 & handle result 1309 1310 __ pop(ltos); 1311 LP64_ONLY( __ pop(dtos)); 1312 1313 __ movptr(t, Address(rbp, 1314 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1315 __ call(t); 1316 1317 // remove activation 1318 __ movptr(t, Address(rbp, 1319 frame::interpreter_frame_sender_sp_offset * 1320 wordSize)); // get sender sp 1321 __ leave(); // remove frame anchor 1322 __ pop(rdi); // get return address 1323 __ mov(rsp, t); // set sp to sender sp 1324 __ jmp(rdi); 1325 1326 if (inc_counter) { 1327 // Handle overflow of counter and compile method 1328 __ bind(invocation_counter_overflow); 1329 generate_counter_overflow(continue_after_compile); 1330 } 1331 1332 return entry_point; 1333 } 1334 1335 // Abstract method entry 1336 // Attempt to execute abstract method. Throw exception 1337 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1338 1339 address entry_point = __ pc(); 1340 1341 // abstract method entry 1342 1343 // pop return address, reset last_sp to NULL 1344 __ empty_expression_stack(); 1345 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1346 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1347 1348 // throw exception 1349 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 1350 // the call_VM checks for exception, so we should never return here. 1351 __ should_not_reach_here(); 1352 1353 return entry_point; 1354 } 1355 1356 // 1357 // Generic interpreted method entry to (asm) interpreter 1358 // 1359 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1360 // determine code generation flags 1361 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1362 1363 // ebx: Method* 1364 // rbcp: sender sp 1365 address entry_point = __ pc(); 1366 1367 const Address constMethod(rbx, Method::const_offset()); 1368 const Address access_flags(rbx, Method::access_flags_offset()); 1369 const Address size_of_parameters(rdx, 1370 ConstMethod::size_of_parameters_offset()); 1371 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1372 1373 1374 // get parameter size (always needed) 1375 __ movptr(rdx, constMethod); 1376 __ load_unsigned_short(rcx, size_of_parameters); 1377 1378 // rbx: Method* 1379 // rcx: size of parameters 1380 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1381 1382 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1383 __ subl(rdx, rcx); // rdx = no. of additional locals 1384 1385 // YYY 1386 // __ incrementl(rdx); 1387 // __ andl(rdx, -2); 1388 1389 // see if we've got enough room on the stack for locals plus overhead. 1390 generate_stack_overflow_check(); 1391 1392 // get return address 1393 __ pop(rax); 1394 1395 // compute beginning of parameters 1396 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1397 1398 // rdx - # of additional locals 1399 // allocate space for locals 1400 // explicitly initialize locals 1401 { 1402 Label exit, loop; 1403 __ testl(rdx, rdx); 1404 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1405 __ bind(loop); 1406 __ push((int) NULL_WORD); // initialize local variables 1407 __ decrementl(rdx); // until everything initialized 1408 __ jcc(Assembler::greater, loop); 1409 __ bind(exit); 1410 } 1411 1412 // initialize fixed part of activation frame 1413 generate_fixed_frame(false); 1414 1415 // make sure method is not native & not abstract 1416 #ifdef ASSERT 1417 __ movl(rax, access_flags); 1418 { 1419 Label L; 1420 __ testl(rax, JVM_ACC_NATIVE); 1421 __ jcc(Assembler::zero, L); 1422 __ stop("tried to execute native method as non-native"); 1423 __ bind(L); 1424 } 1425 { 1426 Label L; 1427 __ testl(rax, JVM_ACC_ABSTRACT); 1428 __ jcc(Assembler::zero, L); 1429 __ stop("tried to execute abstract method in interpreter"); 1430 __ bind(L); 1431 } 1432 #endif 1433 1434 // Since at this point in the method invocation the exception 1435 // handler would try to exit the monitor of synchronized methods 1436 // which hasn't been entered yet, we set the thread local variable 1437 // _do_not_unlock_if_synchronized to true. The remove_activation 1438 // will check this flag. 1439 1440 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1441 NOT_LP64(__ get_thread(thread)); 1442 const Address do_not_unlock_if_synchronized(thread, 1443 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1444 __ movbool(do_not_unlock_if_synchronized, true); 1445 1446 __ profile_parameters_type(rax, rcx, rdx); 1447 // increment invocation count & check for overflow 1448 Label invocation_counter_overflow; 1449 Label profile_method; 1450 Label profile_method_continue; 1451 if (inc_counter) { 1452 generate_counter_incr(&invocation_counter_overflow, 1453 &profile_method, 1454 &profile_method_continue); 1455 if (ProfileInterpreter) { 1456 __ bind(profile_method_continue); 1457 } 1458 } 1459 1460 Label continue_after_compile; 1461 __ bind(continue_after_compile); 1462 1463 // check for synchronized interpreted methods 1464 bang_stack_shadow_pages(false); 1465 1466 // reset the _do_not_unlock_if_synchronized flag 1467 NOT_LP64(__ get_thread(thread)); 1468 __ movbool(do_not_unlock_if_synchronized, false); 1469 1470 // check for synchronized methods 1471 // Must happen AFTER invocation_counter check and stack overflow check, 1472 // so method is not locked if overflows. 1473 if (synchronized) { 1474 // Allocate monitor and lock method 1475 lock_method(); 1476 } else { 1477 // no synchronization necessary 1478 #ifdef ASSERT 1479 { 1480 Label L; 1481 __ movl(rax, access_flags); 1482 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1483 __ jcc(Assembler::zero, L); 1484 __ stop("method needs synchronization"); 1485 __ bind(L); 1486 } 1487 #endif 1488 } 1489 1490 // start execution 1491 #ifdef ASSERT 1492 { 1493 Label L; 1494 const Address monitor_block_top (rbp, 1495 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1496 __ movptr(rax, monitor_block_top); 1497 __ cmpptr(rax, rsp); 1498 __ jcc(Assembler::equal, L); 1499 __ stop("broken stack frame setup in interpreter"); 1500 __ bind(L); 1501 } 1502 #endif 1503 1504 // jvmti support 1505 __ notify_method_entry(); 1506 1507 __ dispatch_next(vtos); 1508 1509 // invocation counter overflow 1510 if (inc_counter) { 1511 if (ProfileInterpreter) { 1512 // We have decided to profile this method in the interpreter 1513 __ bind(profile_method); 1514 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1515 __ set_method_data_pointer_for_bcp(); 1516 __ get_method(rbx); 1517 __ jmp(profile_method_continue); 1518 } 1519 // Handle overflow of counter and compile method 1520 __ bind(invocation_counter_overflow); 1521 generate_counter_overflow(continue_after_compile); 1522 } 1523 1524 return entry_point; 1525 } 1526 1527 //----------------------------------------------------------------------------- 1528 // Exceptions 1529 1530 void TemplateInterpreterGenerator::generate_throw_exception() { 1531 // Entry point in previous activation (i.e., if the caller was 1532 // interpreted) 1533 Interpreter::_rethrow_exception_entry = __ pc(); 1534 // Restore sp to interpreter_frame_last_sp even though we are going 1535 // to empty the expression stack for the exception processing. 1536 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1537 // rax: exception 1538 // rdx: return address/pc that threw exception 1539 __ restore_bcp(); // r13/rsi points to call/send 1540 __ restore_locals(); 1541 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1542 // Entry point for exceptions thrown within interpreter code 1543 Interpreter::_throw_exception_entry = __ pc(); 1544 // expression stack is undefined here 1545 // rax: exception 1546 // r13/rsi: exception bcp 1547 __ verify_oop(rax); 1548 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1549 LP64_ONLY(__ mov(c_rarg1, rax)); 1550 1551 // expression stack must be empty before entering the VM in case of 1552 // an exception 1553 __ empty_expression_stack(); 1554 // find exception handler address and preserve exception oop 1555 __ call_VM(rdx, 1556 CAST_FROM_FN_PTR(address, 1557 InterpreterRuntime::exception_handler_for_exception), 1558 rarg); 1559 // rax: exception handler entry point 1560 // rdx: preserved exception oop 1561 // r13/rsi: bcp for exception handler 1562 __ push_ptr(rdx); // push exception which is now the only value on the stack 1563 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1564 1565 // If the exception is not handled in the current frame the frame is 1566 // removed and the exception is rethrown (i.e. exception 1567 // continuation is _rethrow_exception). 1568 // 1569 // Note: At this point the bci is still the bxi for the instruction 1570 // which caused the exception and the expression stack is 1571 // empty. Thus, for any VM calls at this point, GC will find a legal 1572 // oop map (with empty expression stack). 1573 1574 // In current activation 1575 // tos: exception 1576 // esi: exception bcp 1577 1578 // 1579 // JVMTI PopFrame support 1580 // 1581 1582 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1583 __ empty_expression_stack(); 1584 // Set the popframe_processing bit in pending_popframe_condition 1585 // indicating that we are currently handling popframe, so that 1586 // call_VMs that may happen later do not trigger new popframe 1587 // handling cycles. 1588 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1589 NOT_LP64(__ get_thread(thread)); 1590 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1591 __ orl(rdx, JavaThread::popframe_processing_bit); 1592 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1593 1594 { 1595 // Check to see whether we are returning to a deoptimized frame. 1596 // (The PopFrame call ensures that the caller of the popped frame is 1597 // either interpreted or compiled and deoptimizes it if compiled.) 1598 // In this case, we can't call dispatch_next() after the frame is 1599 // popped, but instead must save the incoming arguments and restore 1600 // them after deoptimization has occurred. 1601 // 1602 // Note that we don't compare the return PC against the 1603 // deoptimization blob's unpack entry because of the presence of 1604 // adapter frames in C2. 1605 Label caller_not_deoptimized; 1606 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1607 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1608 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1609 InterpreterRuntime::interpreter_contains), rarg); 1610 __ testl(rax, rax); 1611 __ jcc(Assembler::notZero, caller_not_deoptimized); 1612 1613 // Compute size of arguments for saving when returning to 1614 // deoptimized caller 1615 __ get_method(rax); 1616 __ movptr(rax, Address(rax, Method::const_offset())); 1617 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1618 size_of_parameters_offset()))); 1619 __ shll(rax, Interpreter::logStackElementSize); 1620 __ restore_locals(); 1621 __ subptr(rlocals, rax); 1622 __ addptr(rlocals, wordSize); 1623 // Save these arguments 1624 NOT_LP64(__ get_thread(thread)); 1625 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1626 Deoptimization:: 1627 popframe_preserve_args), 1628 thread, rax, rlocals); 1629 1630 __ remove_activation(vtos, rdx, 1631 /* throw_monitor_exception */ false, 1632 /* install_monitor_exception */ false, 1633 /* notify_jvmdi */ false); 1634 1635 // Inform deoptimization that it is responsible for restoring 1636 // these arguments 1637 NOT_LP64(__ get_thread(thread)); 1638 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1639 JavaThread::popframe_force_deopt_reexecution_bit); 1640 1641 // Continue in deoptimization handler 1642 __ jmp(rdx); 1643 1644 __ bind(caller_not_deoptimized); 1645 } 1646 1647 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1648 /* throw_monitor_exception */ false, 1649 /* install_monitor_exception */ false, 1650 /* notify_jvmdi */ false); 1651 1652 // Finish with popframe handling 1653 // A previous I2C followed by a deoptimization might have moved the 1654 // outgoing arguments further up the stack. PopFrame expects the 1655 // mutations to those outgoing arguments to be preserved and other 1656 // constraints basically require this frame to look exactly as 1657 // though it had previously invoked an interpreted activation with 1658 // no space between the top of the expression stack (current 1659 // last_sp) and the top of stack. Rather than force deopt to 1660 // maintain this kind of invariant all the time we call a small 1661 // fixup routine to move the mutated arguments onto the top of our 1662 // expression stack if necessary. 1663 #ifndef _LP64 1664 __ mov(rax, rsp); 1665 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1666 __ get_thread(thread); 1667 // PC must point into interpreter here 1668 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1669 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1670 __ get_thread(thread); 1671 #else 1672 __ mov(c_rarg1, rsp); 1673 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1674 // PC must point into interpreter here 1675 __ set_last_Java_frame(noreg, rbp, __ pc()); 1676 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1677 #endif 1678 __ reset_last_Java_frame(thread, true); 1679 1680 // Restore the last_sp and null it out 1681 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1682 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1683 1684 __ restore_bcp(); 1685 __ restore_locals(); 1686 // The method data pointer was incremented already during 1687 // call profiling. We have to restore the mdp for the current bcp. 1688 if (ProfileInterpreter) { 1689 __ set_method_data_pointer_for_bcp(); 1690 } 1691 1692 // Clear the popframe condition flag 1693 NOT_LP64(__ get_thread(thread)); 1694 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1695 JavaThread::popframe_inactive); 1696 1697 #if INCLUDE_JVMTI 1698 { 1699 Label L_done; 1700 const Register local0 = rlocals; 1701 1702 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1703 __ jcc(Assembler::notEqual, L_done); 1704 1705 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1706 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1707 1708 __ get_method(rdx); 1709 __ movptr(rax, Address(local0, 0)); 1710 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1711 1712 __ testptr(rax, rax); 1713 __ jcc(Assembler::zero, L_done); 1714 1715 __ movptr(Address(rbx, 0), rax); 1716 __ bind(L_done); 1717 } 1718 #endif // INCLUDE_JVMTI 1719 1720 __ dispatch_next(vtos); 1721 // end of PopFrame support 1722 1723 Interpreter::_remove_activation_entry = __ pc(); 1724 1725 // preserve exception over this code sequence 1726 __ pop_ptr(rax); 1727 NOT_LP64(__ get_thread(thread)); 1728 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1729 // remove the activation (without doing throws on illegalMonitorExceptions) 1730 __ remove_activation(vtos, rdx, false, true, false); 1731 // restore exception 1732 NOT_LP64(__ get_thread(thread)); 1733 __ get_vm_result(rax, thread); 1734 1735 // In between activations - previous activation type unknown yet 1736 // compute continuation point - the continuation point expects the 1737 // following registers set up: 1738 // 1739 // rax: exception 1740 // rdx: return address/pc that threw exception 1741 // rsp: expression stack of caller 1742 // rbp: ebp of caller 1743 __ push(rax); // save exception 1744 __ push(rdx); // save return address 1745 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1746 SharedRuntime::exception_handler_for_return_address), 1747 thread, rdx); 1748 __ mov(rbx, rax); // save exception handler 1749 __ pop(rdx); // restore return address 1750 __ pop(rax); // restore exception 1751 // Note that an "issuing PC" is actually the next PC after the call 1752 __ jmp(rbx); // jump to exception 1753 // handler of caller 1754 } 1755 1756 1757 // 1758 // JVMTI ForceEarlyReturn support 1759 // 1760 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1761 address entry = __ pc(); 1762 1763 __ restore_bcp(); 1764 __ restore_locals(); 1765 __ empty_expression_stack(); 1766 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1767 1768 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1769 NOT_LP64(__ get_thread(thread)); 1770 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1771 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1772 1773 // Clear the earlyret state 1774 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1775 1776 __ remove_activation(state, rsi, 1777 false, /* throw_monitor_exception */ 1778 false, /* install_monitor_exception */ 1779 true); /* notify_jvmdi */ 1780 __ jmp(rsi); 1781 1782 return entry; 1783 } // end of ForceEarlyReturn support 1784 1785 1786 //----------------------------------------------------------------------------- 1787 // Helper for vtos entry point generation 1788 1789 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1790 address& bep, 1791 address& cep, 1792 address& sep, 1793 address& aep, 1794 address& iep, 1795 address& lep, 1796 address& fep, 1797 address& dep, 1798 address& vep) { 1799 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1800 Label L; 1801 aep = __ pc(); __ push_ptr(); __ jmp(L); 1802 #ifndef _LP64 1803 fep = __ pc(); __ push(ftos); __ jmp(L); 1804 dep = __ pc(); __ push(dtos); __ jmp(L); 1805 #else 1806 fep = __ pc(); __ push_f(xmm0); __ jmp(L); 1807 dep = __ pc(); __ push_d(xmm0); __ jmp(L); 1808 #endif // _LP64 1809 lep = __ pc(); __ push_l(); __ jmp(L); 1810 bep = cep = sep = 1811 iep = __ pc(); __ push_i(); 1812 vep = __ pc(); 1813 __ bind(L); 1814 generate_and_dispatch(t); 1815 } 1816 1817 //----------------------------------------------------------------------------- 1818 1819 // Non-product code 1820 #ifndef PRODUCT 1821 1822 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1823 address entry = __ pc(); 1824 1825 #ifndef _LP64 1826 // prepare expression stack 1827 __ pop(rcx); // pop return address so expression stack is 'pure' 1828 __ push(state); // save tosca 1829 1830 // pass tosca registers as arguments & call tracer 1831 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1832 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1833 __ pop(state); // restore tosca 1834 1835 // return 1836 __ jmp(rcx); 1837 #else 1838 __ push(state); 1839 __ push(c_rarg0); 1840 __ push(c_rarg1); 1841 __ push(c_rarg2); 1842 __ push(c_rarg3); 1843 __ mov(c_rarg2, rax); // Pass itos 1844 #ifdef _WIN64 1845 __ movflt(xmm3, xmm0); // Pass ftos 1846 #endif 1847 __ call_VM(noreg, 1848 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1849 c_rarg1, c_rarg2, c_rarg3); 1850 __ pop(c_rarg3); 1851 __ pop(c_rarg2); 1852 __ pop(c_rarg1); 1853 __ pop(c_rarg0); 1854 __ pop(state); 1855 __ ret(0); // return from result handler 1856 #endif // _LP64 1857 1858 return entry; 1859 } 1860 1861 void TemplateInterpreterGenerator::count_bytecode() { 1862 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1863 } 1864 1865 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1866 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1867 } 1868 1869 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1870 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1871 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1872 __ orl(rbx, 1873 ((int) t->bytecode()) << 1874 BytecodePairHistogram::log2_number_of_codes); 1875 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1876 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1877 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1878 } 1879 1880 1881 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1882 // Call a little run-time stub to avoid blow-up for each bytecode. 1883 // The run-time runtime saves the right registers, depending on 1884 // the tosca in-state for the given template. 1885 1886 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1887 "entry must have been generated"); 1888 #ifndef _LP64 1889 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1890 #else 1891 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1892 __ andptr(rsp, -16); // align stack as required by ABI 1893 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1894 __ mov(rsp, r12); // restore sp 1895 __ reinit_heapbase(); 1896 #endif // _LP64 1897 } 1898 1899 1900 void TemplateInterpreterGenerator::stop_interpreter_at() { 1901 Label L; 1902 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1903 StopInterpreterAt); 1904 __ jcc(Assembler::notEqual, L); 1905 __ int3(); 1906 __ bind(L); 1907 } 1908 #endif // !PRODUCT