1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 #define __ _masm-> 51 52 // Size of interpreter code. Increase if too small. Interpreter will 53 // fail with a guarantee ("not enough space for interpreter generation"); 54 // if too small. 55 // Run with +PrintInterpreter to get the VM to print out the size. 56 // Max size with JVMTI 57 #ifdef AMD64 58 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024; 59 #else 60 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 61 #endif // AMD64 62 63 // Global Register Names 64 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 65 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 66 67 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 68 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 69 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 70 71 72 //----------------------------------------------------------------------------- 73 74 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 75 address entry = __ pc(); 76 77 #ifdef ASSERT 78 { 79 Label L; 80 __ lea(rax, Address(rbp, 81 frame::interpreter_frame_monitor_block_top_offset * 82 wordSize)); 83 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 84 // grows negative) 85 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 86 __ stop ("interpreter frame not set up"); 87 __ bind(L); 88 } 89 #endif // ASSERT 90 // Restore bcp under the assumption that the current frame is still 91 // interpreted 92 __ restore_bcp(); 93 94 // expression stack must be empty before entering the VM if an 95 // exception happened 96 __ empty_expression_stack(); 97 // throw exception 98 __ call_VM(noreg, 99 CAST_FROM_FN_PTR(address, 100 InterpreterRuntime::throw_StackOverflowError)); 101 return entry; 102 } 103 104 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 105 const char* name) { 106 address entry = __ pc(); 107 // expression stack must be empty before entering the VM if an 108 // exception happened 109 __ empty_expression_stack(); 110 // setup parameters 111 // ??? convention: expect aberrant index in register ebx 112 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 113 __ lea(rarg, ExternalAddress((address)name)); 114 __ call_VM(noreg, 115 CAST_FROM_FN_PTR(address, 116 InterpreterRuntime:: 117 throw_ArrayIndexOutOfBoundsException), 118 rarg, rbx); 119 return entry; 120 } 121 122 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 123 address entry = __ pc(); 124 125 // object is at TOS 126 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 127 __ pop(rarg); 128 129 // expression stack must be empty before entering the VM if an 130 // exception happened 131 __ empty_expression_stack(); 132 133 __ call_VM(noreg, 134 CAST_FROM_FN_PTR(address, 135 InterpreterRuntime:: 136 throw_ClassCastException), 137 rarg); 138 return entry; 139 } 140 141 address TemplateInterpreterGenerator::generate_exception_handler_common( 142 const char* name, const char* message, bool pass_oop) { 143 assert(!pass_oop || message == NULL, "either oop or message but not both"); 144 address entry = __ pc(); 145 146 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 147 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 148 149 if (pass_oop) { 150 // object is at TOS 151 __ pop(rarg2); 152 } 153 // expression stack must be empty before entering the VM if an 154 // exception happened 155 __ empty_expression_stack(); 156 // setup parameters 157 __ lea(rarg, ExternalAddress((address)name)); 158 if (pass_oop) { 159 __ call_VM(rax, CAST_FROM_FN_PTR(address, 160 InterpreterRuntime:: 161 create_klass_exception), 162 rarg, rarg2); 163 } else { 164 __ lea(rarg2, ExternalAddress((address)message)); 165 __ call_VM(rax, 166 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 167 rarg, rarg2); 168 } 169 // throw exception 170 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 171 return entry; 172 } 173 174 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 175 address entry = __ pc(); 176 177 #ifndef _LP64 178 #ifdef COMPILER2 179 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 180 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 181 for (int i = 1; i < 8; i++) { 182 __ ffree(i); 183 } 184 } else if (UseSSE < 2) { 185 __ empty_FPU_stack(); 186 } 187 #endif // COMPILER2 188 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 189 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 190 } else { 191 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 192 } 193 194 if (state == ftos) { 195 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 196 } else if (state == dtos) { 197 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 198 } 199 #endif // _LP64 200 201 // Restore stack bottom in case i2c adjusted stack 202 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 203 // and NULL it as marker that esp is now tos until next java call 204 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 205 206 if (state == qtos && ValueTypeReturnedAsFields) { 207 // A value type is being returned. If fields are in registers we 208 // need to allocate a value type instance and initialize it with 209 // the value of the fields. 210 __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf()); 211 } 212 213 __ restore_bcp(); 214 __ restore_locals(); 215 216 if (state == atos) { 217 Register mdp = rbx; 218 Register tmp = rcx; 219 __ profile_return_type(mdp, rax, tmp); 220 } 221 222 const Register cache = rbx; 223 const Register index = rcx; 224 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 225 226 const Register flags = cache; 227 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 228 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 229 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 230 231 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 232 if (JvmtiExport::can_pop_frame()) { 233 NOT_LP64(__ get_thread(java_thread)); 234 __ check_and_handle_popframe(java_thread); 235 } 236 if (JvmtiExport::can_force_early_return()) { 237 NOT_LP64(__ get_thread(java_thread)); 238 __ check_and_handle_earlyret(java_thread); 239 } 240 241 __ dispatch_next(state, step); 242 243 return entry; 244 } 245 246 247 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 248 address entry = __ pc(); 249 250 #ifndef _LP64 251 if (state == ftos) { 252 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 253 } else if (state == dtos) { 254 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 255 } 256 #endif // _LP64 257 258 // NULL last_sp until next java call 259 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 260 __ restore_bcp(); 261 __ restore_locals(); 262 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 263 NOT_LP64(__ get_thread(thread)); 264 #if INCLUDE_JVMCI 265 // Check if we need to take lock at entry of synchronized method. This can 266 // only occur on method entry so emit it only for vtos with step 0. 267 if ((UseJVMCICompiler || UseAOT) && state == vtos && step == 0) { 268 Label L; 269 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 270 __ jcc(Assembler::zero, L); 271 // Clear flag. 272 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 273 // Satisfy calling convention for lock_method(). 274 __ get_method(rbx); 275 // Take lock. 276 lock_method(); 277 __ bind(L); 278 } else { 279 #ifdef ASSERT 280 if (UseJVMCICompiler) { 281 Label L; 282 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 283 __ jccb(Assembler::zero, L); 284 __ stop("unexpected pending monitor in deopt entry"); 285 __ bind(L); 286 } 287 #endif 288 } 289 #endif 290 // handle exceptions 291 { 292 Label L; 293 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 294 __ jcc(Assembler::zero, L); 295 __ call_VM(noreg, 296 CAST_FROM_FN_PTR(address, 297 InterpreterRuntime::throw_pending_exception)); 298 __ should_not_reach_here(); 299 __ bind(L); 300 } 301 __ dispatch_next(state, step); 302 return entry; 303 } 304 305 address TemplateInterpreterGenerator::generate_result_handler_for( 306 BasicType type) { 307 address entry = __ pc(); 308 switch (type) { 309 case T_BOOLEAN: __ c2bool(rax); break; 310 #ifndef _LP64 311 case T_CHAR : __ andptr(rax, 0xFFFF); break; 312 #else 313 case T_CHAR : __ movzwl(rax, rax); break; 314 #endif // _LP64 315 case T_BYTE : __ sign_extend_byte(rax); break; 316 case T_SHORT : __ sign_extend_short(rax); break; 317 case T_INT : /* nothing to do */ break; 318 case T_LONG : /* nothing to do */ break; 319 case T_VOID : /* nothing to do */ break; 320 #ifndef _LP64 321 case T_DOUBLE : 322 case T_FLOAT : 323 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 324 __ pop(t); // remove return address first 325 // Must return a result for interpreter or compiler. In SSE 326 // mode, results are returned in xmm0 and the FPU stack must 327 // be empty. 328 if (type == T_FLOAT && UseSSE >= 1) { 329 // Load ST0 330 __ fld_d(Address(rsp, 0)); 331 // Store as float and empty fpu stack 332 __ fstp_s(Address(rsp, 0)); 333 // and reload 334 __ movflt(xmm0, Address(rsp, 0)); 335 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 336 __ movdbl(xmm0, Address(rsp, 0)); 337 } else { 338 // restore ST0 339 __ fld_d(Address(rsp, 0)); 340 } 341 // and pop the temp 342 __ addptr(rsp, 2 * wordSize); 343 __ push(t); // restore return address 344 } 345 break; 346 #else 347 case T_FLOAT : /* nothing to do */ break; 348 case T_DOUBLE : /* nothing to do */ break; 349 #endif // _LP64 350 351 case T_VALUETYPE: // fall through (value types are handled with oops) 352 case T_OBJECT : 353 // retrieve result from frame 354 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 355 // and verify it 356 __ verify_oop(rax); 357 break; 358 default : ShouldNotReachHere(); 359 } 360 __ ret(0); // return from result handler 361 return entry; 362 } 363 364 address TemplateInterpreterGenerator::generate_safept_entry_for( 365 TosState state, 366 address runtime_entry) { 367 address entry = __ pc(); 368 __ push(state); 369 __ call_VM(noreg, runtime_entry); 370 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 371 return entry; 372 } 373 374 375 376 // Helpers for commoning out cases in the various type of method entries. 377 // 378 379 380 // increment invocation count & check for overflow 381 // 382 // Note: checking for negative value instead of overflow 383 // so we have a 'sticky' overflow test 384 // 385 // rbx: method 386 // rcx: invocation counter 387 // 388 void TemplateInterpreterGenerator::generate_counter_incr( 389 Label* overflow, 390 Label* profile_method, 391 Label* profile_method_continue) { 392 Label done; 393 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 394 if (TieredCompilation) { 395 int increment = InvocationCounter::count_increment; 396 Label no_mdo; 397 if (ProfileInterpreter) { 398 // Are we profiling? 399 __ movptr(rax, Address(rbx, Method::method_data_offset())); 400 __ testptr(rax, rax); 401 __ jccb(Assembler::zero, no_mdo); 402 // Increment counter in the MDO 403 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 404 in_bytes(InvocationCounter::counter_offset())); 405 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 406 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 407 __ jmp(done); 408 } 409 __ bind(no_mdo); 410 // Increment counter in MethodCounters 411 const Address invocation_counter(rax, 412 MethodCounters::invocation_counter_offset() + 413 InvocationCounter::counter_offset()); 414 __ get_method_counters(rbx, rax, done); 415 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 416 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 417 false, Assembler::zero, overflow); 418 __ bind(done); 419 } else { // not TieredCompilation 420 const Address backedge_counter(rax, 421 MethodCounters::backedge_counter_offset() + 422 InvocationCounter::counter_offset()); 423 const Address invocation_counter(rax, 424 MethodCounters::invocation_counter_offset() + 425 InvocationCounter::counter_offset()); 426 427 __ get_method_counters(rbx, rax, done); 428 429 if (ProfileInterpreter) { 430 __ incrementl(Address(rax, 431 MethodCounters::interpreter_invocation_counter_offset())); 432 } 433 // Update standard invocation counters 434 __ movl(rcx, invocation_counter); 435 __ incrementl(rcx, InvocationCounter::count_increment); 436 __ movl(invocation_counter, rcx); // save invocation count 437 438 __ movl(rax, backedge_counter); // load backedge counter 439 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 440 441 __ addl(rcx, rax); // add both counters 442 443 // profile_method is non-null only for interpreted method so 444 // profile_method != NULL == !native_call 445 446 if (ProfileInterpreter && profile_method != NULL) { 447 // Test to see if we should create a method data oop 448 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 449 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 450 __ jcc(Assembler::less, *profile_method_continue); 451 452 // if no method data exists, go to profile_method 453 __ test_method_data_pointer(rax, *profile_method); 454 } 455 456 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 457 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 458 __ jcc(Assembler::aboveEqual, *overflow); 459 __ bind(done); 460 } 461 } 462 463 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 464 465 // Asm interpreter on entry 466 // r14/rdi - locals 467 // r13/rsi - bcp 468 // rbx - method 469 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 470 // rbp - interpreter frame 471 472 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 473 // Everything as it was on entry 474 // rdx is not restored. Doesn't appear to really be set. 475 476 // InterpreterRuntime::frequency_counter_overflow takes two 477 // arguments, the first (thread) is passed by call_VM, the second 478 // indicates if the counter overflow occurs at a backwards branch 479 // (NULL bcp). We pass zero for it. The call returns the address 480 // of the verified entry point for the method or NULL if the 481 // compilation did not complete (either went background or bailed 482 // out). 483 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 484 __ movl(rarg, 0); 485 __ call_VM(noreg, 486 CAST_FROM_FN_PTR(address, 487 InterpreterRuntime::frequency_counter_overflow), 488 rarg); 489 490 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 491 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 492 // and jump to the interpreted entry. 493 __ jmp(do_continue, relocInfo::none); 494 } 495 496 // See if we've got enough room on the stack for locals plus overhead below 497 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 498 // without going through the signal handler, i.e., reserved and yellow zones 499 // will not be made usable. The shadow zone must suffice to handle the 500 // overflow. 501 // The expression stack grows down incrementally, so the normal guard 502 // page mechanism will work for that. 503 // 504 // NOTE: Since the additional locals are also always pushed (wasn't 505 // obvious in generate_fixed_frame) so the guard should work for them 506 // too. 507 // 508 // Args: 509 // rdx: number of additional locals this frame needs (what we must check) 510 // rbx: Method* 511 // 512 // Kills: 513 // rax 514 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 515 516 // monitor entry size: see picture of stack in frame_x86.hpp 517 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 518 519 // total overhead size: entry_size + (saved rbp through expr stack 520 // bottom). be sure to change this if you add/subtract anything 521 // to/from the overhead area 522 const int overhead_size = 523 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 524 525 const int page_size = os::vm_page_size(); 526 527 Label after_frame_check; 528 529 // see if the frame is greater than one page in size. If so, 530 // then we need to verify there is enough stack space remaining 531 // for the additional locals. 532 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 533 __ jcc(Assembler::belowEqual, after_frame_check); 534 535 // compute rsp as if this were going to be the last frame on 536 // the stack before the red zone 537 538 Label after_frame_check_pop; 539 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 540 #ifndef _LP64 541 __ push(thread); 542 __ get_thread(thread); 543 #endif 544 545 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 546 547 // locals + overhead, in bytes 548 __ mov(rax, rdx); 549 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 550 __ addptr(rax, overhead_size); 551 552 #ifdef ASSERT 553 Label limit_okay; 554 // Verify that thread stack overflow limit is non-zero. 555 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 556 __ jcc(Assembler::notEqual, limit_okay); 557 __ stop("stack overflow limit is zero"); 558 __ bind(limit_okay); 559 #endif 560 561 // Add locals/frame size to stack limit. 562 __ addptr(rax, stack_limit); 563 564 // Check against the current stack bottom. 565 __ cmpptr(rsp, rax); 566 567 __ jcc(Assembler::above, after_frame_check_pop); 568 NOT_LP64(__ pop(rsi)); // get saved bcp 569 570 // Restore sender's sp as SP. This is necessary if the sender's 571 // frame is an extended compiled frame (see gen_c2i_adapter()) 572 // and safer anyway in case of JSR292 adaptations. 573 574 __ pop(rax); // return address must be moved if SP is changed 575 __ mov(rsp, rbcp); 576 __ push(rax); 577 578 // Note: the restored frame is not necessarily interpreted. 579 // Use the shared runtime version of the StackOverflowError. 580 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 581 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 582 // all done with frame size check 583 __ bind(after_frame_check_pop); 584 NOT_LP64(__ pop(rsi)); 585 586 // all done with frame size check 587 __ bind(after_frame_check); 588 } 589 590 // Allocate monitor and lock method (asm interpreter) 591 // 592 // Args: 593 // rbx: Method* 594 // r14/rdi: locals 595 // 596 // Kills: 597 // rax 598 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 599 // rscratch1, rscratch2 (scratch regs) 600 void TemplateInterpreterGenerator::lock_method() { 601 // synchronize method 602 const Address access_flags(rbx, Method::access_flags_offset()); 603 const Address monitor_block_top( 604 rbp, 605 frame::interpreter_frame_monitor_block_top_offset * wordSize); 606 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 607 608 #ifdef ASSERT 609 { 610 Label L; 611 __ movl(rax, access_flags); 612 __ testl(rax, JVM_ACC_SYNCHRONIZED); 613 __ jcc(Assembler::notZero, L); 614 __ stop("method doesn't need synchronization"); 615 __ bind(L); 616 } 617 #endif // ASSERT 618 619 // get synchronization object 620 { 621 Label done; 622 __ movl(rax, access_flags); 623 __ testl(rax, JVM_ACC_STATIC); 624 // get receiver (assume this is frequent case) 625 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 626 __ jcc(Assembler::zero, done); 627 __ load_mirror(rax, rbx); 628 629 #ifdef ASSERT 630 { 631 Label L; 632 __ testptr(rax, rax); 633 __ jcc(Assembler::notZero, L); 634 __ stop("synchronization object is NULL"); 635 __ bind(L); 636 } 637 #endif // ASSERT 638 639 __ bind(done); 640 } 641 642 // add space for monitor & lock 643 __ subptr(rsp, entry_size); // add space for a monitor entry 644 __ movptr(monitor_block_top, rsp); // set new monitor block top 645 // store object 646 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 647 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 648 __ movptr(lockreg, rsp); // object address 649 __ lock_object(lockreg); 650 } 651 652 // Generate a fixed interpreter frame. This is identical setup for 653 // interpreted methods and for native methods hence the shared code. 654 // 655 // Args: 656 // rax: return address 657 // rbx: Method* 658 // r14/rdi: pointer to locals 659 // r13/rsi: sender sp 660 // rdx: cp cache 661 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 662 // initialize fixed part of activation frame 663 __ push(rax); // save return address 664 __ enter(); // save old & set new rbp 665 __ push(rbcp); // set sender sp 666 __ push((int)NULL_WORD); // leave last_sp as null 667 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 668 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 669 __ push(rbx); // save Method* 670 // Get mirror and store it in the frame as GC root for this Method* 671 __ load_mirror(rdx, rbx); 672 __ push(rdx); 673 if (ProfileInterpreter) { 674 Label method_data_continue; 675 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 676 __ testptr(rdx, rdx); 677 __ jcc(Assembler::zero, method_data_continue); 678 __ addptr(rdx, in_bytes(MethodData::data_offset())); 679 __ bind(method_data_continue); 680 __ push(rdx); // set the mdp (method data pointer) 681 } else { 682 __ push(0); 683 } 684 685 __ movptr(rdx, Address(rbx, Method::const_offset())); 686 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 687 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 688 __ push(rdx); // set constant pool cache 689 const Register thread1 = NOT_LP64(rdx) LP64_ONLY(r15_thread); 690 NOT_LP64(__ get_thread(thread1)); 691 __ movptr(rdx, Address(thread1, JavaThread::vt_alloc_ptr_offset())); 692 __ push(rdx); // value type allocation pointer when activation is created 693 __ push(rlocals); // set locals pointer 694 if (native_call) { 695 __ push(0); // no bcp 696 } else { 697 __ push(rbcp); // set bcp 698 } 699 __ push(0); // reserve word for pointer to expression stack bottom 700 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 701 } 702 703 // End of helpers 704 705 // Method entry for java.lang.ref.Reference.get. 706 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 707 #if INCLUDE_ALL_GCS 708 // Code: _aload_0, _getfield, _areturn 709 // parameter size = 1 710 // 711 // The code that gets generated by this routine is split into 2 parts: 712 // 1. The "intrinsified" code for G1 (or any SATB based GC), 713 // 2. The slow path - which is an expansion of the regular method entry. 714 // 715 // Notes:- 716 // * In the G1 code we do not check whether we need to block for 717 // a safepoint. If G1 is enabled then we must execute the specialized 718 // code for Reference.get (except when the Reference object is null) 719 // so that we can log the value in the referent field with an SATB 720 // update buffer. 721 // If the code for the getfield template is modified so that the 722 // G1 pre-barrier code is executed when the current method is 723 // Reference.get() then going through the normal method entry 724 // will be fine. 725 // * The G1 code can, however, check the receiver object (the instance 726 // of java.lang.Reference) and jump to the slow path if null. If the 727 // Reference object is null then we obviously cannot fetch the referent 728 // and so we don't need to call the G1 pre-barrier. Thus we can use the 729 // regular method entry code to generate the NPE. 730 // 731 // rbx: Method* 732 733 // r13: senderSP must preserve for slow path, set SP to it on fast path 734 735 address entry = __ pc(); 736 737 const int referent_offset = java_lang_ref_Reference::referent_offset; 738 guarantee(referent_offset > 0, "referent offset not initialized"); 739 740 if (UseG1GC) { 741 Label slow_path; 742 // rbx: method 743 744 // Check if local 0 != NULL 745 // If the receiver is null then it is OK to jump to the slow path. 746 __ movptr(rax, Address(rsp, wordSize)); 747 748 __ testptr(rax, rax); 749 __ jcc(Assembler::zero, slow_path); 750 751 // rax: local 0 752 // rbx: method (but can be used as scratch now) 753 // rdx: scratch 754 // rdi: scratch 755 756 // Preserve the sender sp in case the pre-barrier 757 // calls the runtime 758 NOT_LP64(__ push(rsi)); 759 760 // Generate the G1 pre-barrier code to log the value of 761 // the referent field in an SATB buffer. 762 763 // Load the value of the referent field. 764 const Address field_address(rax, referent_offset); 765 __ load_heap_oop(rax, field_address); 766 767 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 768 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 769 NOT_LP64(__ get_thread(thread)); 770 771 // Generate the G1 pre-barrier code to log the value of 772 // the referent field in an SATB buffer. 773 __ g1_write_barrier_pre(noreg /* obj */, 774 rax /* pre_val */, 775 thread /* thread */, 776 rbx /* tmp */, 777 true /* tosca_live */, 778 true /* expand_call */); 779 780 // _areturn 781 NOT_LP64(__ pop(rsi)); // get sender sp 782 __ pop(rdi); // get return address 783 __ mov(rsp, sender_sp); // set sp to sender sp 784 __ jmp(rdi); 785 __ ret(0); 786 787 // generate a vanilla interpreter entry as the slow path 788 __ bind(slow_path); 789 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 790 return entry; 791 } 792 #endif // INCLUDE_ALL_GCS 793 794 // If G1 is not enabled then attempt to go through the accessor entry point 795 // Reference.get is an accessor 796 return NULL; 797 } 798 799 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 800 // Quick & dirty stack overflow checking: bang the stack & handle trap. 801 // Note that we do the banging after the frame is setup, since the exception 802 // handling code expects to find a valid interpreter frame on the stack. 803 // Doing the banging earlier fails if the caller frame is not an interpreter 804 // frame. 805 // (Also, the exception throwing code expects to unlock any synchronized 806 // method receiever, so do the banging after locking the receiver.) 807 808 // Bang each page in the shadow zone. We can't assume it's been done for 809 // an interpreter frame with greater than a page of locals, so each page 810 // needs to be checked. Only true for non-native. 811 if (UseStackBanging) { 812 const int page_size = os::vm_page_size(); 813 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 814 const int start_page = native_call ? n_shadow_pages : 1; 815 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 816 __ bang_stack_with_offset(pages*page_size); 817 } 818 } 819 } 820 821 // Interpreter stub for calling a native method. (asm interpreter) 822 // This sets up a somewhat different looking stack for calling the 823 // native method than the typical interpreter frame setup. 824 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 825 // determine code generation flags 826 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 827 828 // rbx: Method* 829 // rbcp: sender sp 830 831 address entry_point = __ pc(); 832 833 const Address constMethod (rbx, Method::const_offset()); 834 const Address access_flags (rbx, Method::access_flags_offset()); 835 const Address size_of_parameters(rcx, ConstMethod:: 836 size_of_parameters_offset()); 837 838 839 // get parameter size (always needed) 840 __ movptr(rcx, constMethod); 841 __ load_unsigned_short(rcx, size_of_parameters); 842 843 // native calls don't need the stack size check since they have no 844 // expression stack and the arguments are already on the stack and 845 // we only add a handful of words to the stack 846 847 // rbx: Method* 848 // rcx: size of parameters 849 // rbcp: sender sp 850 __ pop(rax); // get return address 851 852 // for natives the size of locals is zero 853 854 // compute beginning of parameters 855 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 856 857 // add 2 zero-initialized slots for native calls 858 // initialize result_handler slot 859 __ push((int) NULL_WORD); 860 // slot for oop temp 861 // (static native method holder mirror/jni oop result) 862 __ push((int) NULL_WORD); 863 864 // initialize fixed part of activation frame 865 generate_fixed_frame(true); 866 867 // make sure method is native & not abstract 868 #ifdef ASSERT 869 __ movl(rax, access_flags); 870 { 871 Label L; 872 __ testl(rax, JVM_ACC_NATIVE); 873 __ jcc(Assembler::notZero, L); 874 __ stop("tried to execute non-native method as native"); 875 __ bind(L); 876 } 877 { 878 Label L; 879 __ testl(rax, JVM_ACC_ABSTRACT); 880 __ jcc(Assembler::zero, L); 881 __ stop("tried to execute abstract method in interpreter"); 882 __ bind(L); 883 } 884 #endif 885 886 // Since at this point in the method invocation the exception handler 887 // would try to exit the monitor of synchronized methods which hasn't 888 // been entered yet, we set the thread local variable 889 // _do_not_unlock_if_synchronized to true. The remove_activation will 890 // check this flag. 891 892 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 893 NOT_LP64(__ get_thread(thread1)); 894 const Address do_not_unlock_if_synchronized(thread1, 895 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 896 __ movbool(do_not_unlock_if_synchronized, true); 897 898 // increment invocation count & check for overflow 899 Label invocation_counter_overflow; 900 if (inc_counter) { 901 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 902 } 903 904 Label continue_after_compile; 905 __ bind(continue_after_compile); 906 907 bang_stack_shadow_pages(true); 908 909 // reset the _do_not_unlock_if_synchronized flag 910 NOT_LP64(__ get_thread(thread1)); 911 __ movbool(do_not_unlock_if_synchronized, false); 912 913 // check for synchronized methods 914 // Must happen AFTER invocation_counter check and stack overflow check, 915 // so method is not locked if overflows. 916 if (synchronized) { 917 lock_method(); 918 } else { 919 // no synchronization necessary 920 #ifdef ASSERT 921 { 922 Label L; 923 __ movl(rax, access_flags); 924 __ testl(rax, JVM_ACC_SYNCHRONIZED); 925 __ jcc(Assembler::zero, L); 926 __ stop("method needs synchronization"); 927 __ bind(L); 928 } 929 #endif 930 } 931 932 // start execution 933 #ifdef ASSERT 934 { 935 Label L; 936 const Address monitor_block_top(rbp, 937 frame::interpreter_frame_monitor_block_top_offset * wordSize); 938 __ movptr(rax, monitor_block_top); 939 __ cmpptr(rax, rsp); 940 __ jcc(Assembler::equal, L); 941 __ stop("broken stack frame setup in interpreter"); 942 __ bind(L); 943 } 944 #endif 945 946 // jvmti support 947 __ notify_method_entry(); 948 949 // work registers 950 const Register method = rbx; 951 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 952 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 953 954 // allocate space for parameters 955 __ get_method(method); 956 __ movptr(t, Address(method, Method::const_offset())); 957 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 958 959 #ifndef _LP64 960 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 961 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 962 __ subptr(rsp, t); 963 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 964 #else 965 __ shll(t, Interpreter::logStackElementSize); 966 967 __ subptr(rsp, t); 968 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 969 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 970 #endif // _LP64 971 972 // get signature handler 973 { 974 Label L; 975 __ movptr(t, Address(method, Method::signature_handler_offset())); 976 __ testptr(t, t); 977 __ jcc(Assembler::notZero, L); 978 __ call_VM(noreg, 979 CAST_FROM_FN_PTR(address, 980 InterpreterRuntime::prepare_native_call), 981 method); 982 __ get_method(method); 983 __ movptr(t, Address(method, Method::signature_handler_offset())); 984 __ bind(L); 985 } 986 987 // call signature handler 988 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 989 "adjust this code"); 990 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 991 "adjust this code"); 992 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 993 "adjust this code"); 994 995 // The generated handlers do not touch RBX (the method oop). 996 // However, large signatures cannot be cached and are generated 997 // each time here. The slow-path generator can do a GC on return, 998 // so we must reload it after the call. 999 __ call(t); 1000 __ get_method(method); // slow path can do a GC, reload RBX 1001 1002 1003 // result handler is in rax 1004 // set result handler 1005 __ movptr(Address(rbp, 1006 (frame::interpreter_frame_result_handler_offset) * wordSize), 1007 rax); 1008 1009 // pass mirror handle if static call 1010 { 1011 Label L; 1012 __ movl(t, Address(method, Method::access_flags_offset())); 1013 __ testl(t, JVM_ACC_STATIC); 1014 __ jcc(Assembler::zero, L); 1015 // get mirror 1016 __ load_mirror(t, method); 1017 // copy mirror into activation frame 1018 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1019 t); 1020 // pass handle to mirror 1021 #ifndef _LP64 1022 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1023 __ movptr(Address(rsp, wordSize), t); 1024 #else 1025 __ lea(c_rarg1, 1026 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1027 #endif // _LP64 1028 __ bind(L); 1029 } 1030 1031 // get native function entry point 1032 { 1033 Label L; 1034 __ movptr(rax, Address(method, Method::native_function_offset())); 1035 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1036 __ cmpptr(rax, unsatisfied.addr()); 1037 __ jcc(Assembler::notEqual, L); 1038 __ call_VM(noreg, 1039 CAST_FROM_FN_PTR(address, 1040 InterpreterRuntime::prepare_native_call), 1041 method); 1042 __ get_method(method); 1043 __ movptr(rax, Address(method, Method::native_function_offset())); 1044 __ bind(L); 1045 } 1046 1047 // pass JNIEnv 1048 #ifndef _LP64 1049 __ get_thread(thread); 1050 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1051 __ movptr(Address(rsp, 0), t); 1052 1053 // set_last_Java_frame_before_call 1054 // It is enough that the pc() 1055 // points into the right code segment. It does not have to be the correct return pc. 1056 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1057 #else 1058 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1059 1060 // It is enough that the pc() points into the right code 1061 // segment. It does not have to be the correct return pc. 1062 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1063 #endif // _LP64 1064 1065 // change thread state 1066 #ifdef ASSERT 1067 { 1068 Label L; 1069 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1070 __ cmpl(t, _thread_in_Java); 1071 __ jcc(Assembler::equal, L); 1072 __ stop("Wrong thread state in native stub"); 1073 __ bind(L); 1074 } 1075 #endif 1076 1077 // Change state to native 1078 1079 __ movl(Address(thread, JavaThread::thread_state_offset()), 1080 _thread_in_native); 1081 1082 // Call the native method. 1083 __ call(rax); 1084 // 32: result potentially in rdx:rax or ST0 1085 // 64: result potentially in rax or xmm0 1086 1087 // Verify or restore cpu control state after JNI call 1088 __ restore_cpu_control_state_after_jni(); 1089 1090 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1091 // in order to extract the result of a method call. If the order of these 1092 // pushes change or anything else is added to the stack then the code in 1093 // interpreter_frame_result must also change. 1094 1095 #ifndef _LP64 1096 // save potential result in ST(0) & rdx:rax 1097 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1098 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1099 // It is safe to do this push because state is _thread_in_native and return address will be found 1100 // via _last_native_pc and not via _last_jave_sp 1101 1102 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1103 // If the order changes or anything else is added to the stack the code in 1104 // interpreter_frame_result will have to be changed. 1105 1106 { Label L; 1107 Label push_double; 1108 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1109 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1110 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1111 float_handler.addr()); 1112 __ jcc(Assembler::equal, push_double); 1113 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1114 double_handler.addr()); 1115 __ jcc(Assembler::notEqual, L); 1116 __ bind(push_double); 1117 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1118 __ bind(L); 1119 } 1120 #else 1121 __ push(dtos); 1122 #endif // _LP64 1123 1124 __ push(ltos); 1125 1126 // change thread state 1127 NOT_LP64(__ get_thread(thread)); 1128 __ movl(Address(thread, JavaThread::thread_state_offset()), 1129 _thread_in_native_trans); 1130 1131 if (os::is_MP()) { 1132 if (UseMembar) { 1133 // Force this write out before the read below 1134 __ membar(Assembler::Membar_mask_bits( 1135 Assembler::LoadLoad | Assembler::LoadStore | 1136 Assembler::StoreLoad | Assembler::StoreStore)); 1137 } else { 1138 // Write serialization page so VM thread can do a pseudo remote membar. 1139 // We use the current thread pointer to calculate a thread specific 1140 // offset to write to within the page. This minimizes bus traffic 1141 // due to cache line collision. 1142 __ serialize_memory(thread, rcx); 1143 } 1144 } 1145 1146 #ifndef _LP64 1147 if (AlwaysRestoreFPU) { 1148 // Make sure the control word is correct. 1149 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1150 } 1151 #endif // _LP64 1152 1153 // check for safepoint operation in progress and/or pending suspend requests 1154 { 1155 Label Continue; 1156 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1157 SafepointSynchronize::_not_synchronized); 1158 1159 Label L; 1160 __ jcc(Assembler::notEqual, L); 1161 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1162 __ jcc(Assembler::equal, Continue); 1163 __ bind(L); 1164 1165 // Don't use call_VM as it will see a possible pending exception 1166 // and forward it and never return here preventing us from 1167 // clearing _last_native_pc down below. Also can't use 1168 // call_VM_leaf either as it will check to see if r13 & r14 are 1169 // preserved and correspond to the bcp/locals pointers. So we do a 1170 // runtime call by hand. 1171 // 1172 #ifndef _LP64 1173 __ push(thread); 1174 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1175 JavaThread::check_special_condition_for_native_trans))); 1176 __ increment(rsp, wordSize); 1177 __ get_thread(thread); 1178 #else 1179 __ mov(c_rarg0, r15_thread); 1180 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1181 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1182 __ andptr(rsp, -16); // align stack as required by ABI 1183 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1184 __ mov(rsp, r12); // restore sp 1185 __ reinit_heapbase(); 1186 #endif // _LP64 1187 __ bind(Continue); 1188 } 1189 1190 // change thread state 1191 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1192 1193 // reset_last_Java_frame 1194 __ reset_last_Java_frame(thread, true); 1195 1196 if (CheckJNICalls) { 1197 // clear_pending_jni_exception_check 1198 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1199 } 1200 1201 // reset handle block 1202 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1203 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1204 1205 // If result is an oop unbox and store it in frame where gc will see it 1206 // and result handler will pick it up 1207 1208 { 1209 Label no_oop, not_weak, store_result; 1210 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1211 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1212 __ jcc(Assembler::notEqual, no_oop); 1213 // retrieve result 1214 __ pop(ltos); 1215 // Unbox oop result, e.g. JNIHandles::resolve value. 1216 __ resolve_jobject(rax /* value */, 1217 thread /* thread */, 1218 t /* tmp */); 1219 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1220 // keep stack depth as expected by pushing oop which will eventually be discarded 1221 __ push(ltos); 1222 __ bind(no_oop); 1223 } 1224 1225 1226 { 1227 Label no_reguard; 1228 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1229 JavaThread::stack_guard_yellow_reserved_disabled); 1230 __ jcc(Assembler::notEqual, no_reguard); 1231 1232 __ pusha(); // XXX only save smashed registers 1233 #ifndef _LP64 1234 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1235 __ popa(); 1236 #else 1237 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1238 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1239 __ andptr(rsp, -16); // align stack as required by ABI 1240 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1241 __ mov(rsp, r12); // restore sp 1242 __ popa(); // XXX only restore smashed registers 1243 __ reinit_heapbase(); 1244 #endif // _LP64 1245 1246 __ bind(no_reguard); 1247 } 1248 1249 1250 // The method register is junk from after the thread_in_native transition 1251 // until here. Also can't call_VM until the bcp has been 1252 // restored. Need bcp for throwing exception below so get it now. 1253 __ get_method(method); 1254 1255 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1256 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1257 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1258 1259 // handle exceptions (exception handling will handle unlocking!) 1260 { 1261 Label L; 1262 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1263 __ jcc(Assembler::zero, L); 1264 // Note: At some point we may want to unify this with the code 1265 // used in call_VM_base(); i.e., we should use the 1266 // StubRoutines::forward_exception code. For now this doesn't work 1267 // here because the rsp is not correctly set at this point. 1268 __ MacroAssembler::call_VM(noreg, 1269 CAST_FROM_FN_PTR(address, 1270 InterpreterRuntime::throw_pending_exception)); 1271 __ should_not_reach_here(); 1272 __ bind(L); 1273 } 1274 1275 // do unlocking if necessary 1276 { 1277 Label L; 1278 __ movl(t, Address(method, Method::access_flags_offset())); 1279 __ testl(t, JVM_ACC_SYNCHRONIZED); 1280 __ jcc(Assembler::zero, L); 1281 // the code below should be shared with interpreter macro 1282 // assembler implementation 1283 { 1284 Label unlock; 1285 // BasicObjectLock will be first in list, since this is a 1286 // synchronized method. However, need to check that the object 1287 // has not been unlocked by an explicit monitorexit bytecode. 1288 const Address monitor(rbp, 1289 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1290 wordSize - (int)sizeof(BasicObjectLock))); 1291 1292 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1293 1294 // monitor expect in c_rarg1 for slow unlock path 1295 __ lea(regmon, monitor); // address of first monitor 1296 1297 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1298 __ testptr(t, t); 1299 __ jcc(Assembler::notZero, unlock); 1300 1301 // Entry already unlocked, need to throw exception 1302 __ MacroAssembler::call_VM(noreg, 1303 CAST_FROM_FN_PTR(address, 1304 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1305 __ should_not_reach_here(); 1306 1307 __ bind(unlock); 1308 __ unlock_object(regmon); 1309 } 1310 __ bind(L); 1311 } 1312 1313 // jvmti support 1314 // Note: This must happen _after_ handling/throwing any exceptions since 1315 // the exception handler code notifies the runtime of method exits 1316 // too. If this happens before, method entry/exit notifications are 1317 // not properly paired (was bug - gri 11/22/99). 1318 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1319 1320 // restore potential result in edx:eax, call result handler to 1321 // restore potential result in ST0 & handle result 1322 1323 __ pop(ltos); 1324 LP64_ONLY( __ pop(dtos)); 1325 1326 __ movptr(t, Address(rbp, 1327 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1328 __ call(t); 1329 1330 // remove activation 1331 __ movptr(t, Address(rbp, 1332 frame::interpreter_frame_sender_sp_offset * 1333 wordSize)); // get sender sp 1334 __ leave(); // remove frame anchor 1335 __ pop(rdi); // get return address 1336 __ mov(rsp, t); // set sp to sender sp 1337 __ jmp(rdi); 1338 1339 if (inc_counter) { 1340 // Handle overflow of counter and compile method 1341 __ bind(invocation_counter_overflow); 1342 generate_counter_overflow(continue_after_compile); 1343 } 1344 1345 return entry_point; 1346 } 1347 1348 // Abstract method entry 1349 // Attempt to execute abstract method. Throw exception 1350 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1351 1352 address entry_point = __ pc(); 1353 1354 // abstract method entry 1355 1356 // pop return address, reset last_sp to NULL 1357 __ empty_expression_stack(); 1358 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1359 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1360 1361 // throw exception 1362 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 1363 // the call_VM checks for exception, so we should never return here. 1364 __ should_not_reach_here(); 1365 1366 return entry_point; 1367 } 1368 1369 // 1370 // Generic interpreted method entry to (asm) interpreter 1371 // 1372 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1373 // determine code generation flags 1374 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1375 1376 // ebx: Method* 1377 // rbcp: sender sp 1378 address entry_point = __ pc(); 1379 1380 const Address constMethod(rbx, Method::const_offset()); 1381 const Address access_flags(rbx, Method::access_flags_offset()); 1382 const Address size_of_parameters(rdx, 1383 ConstMethod::size_of_parameters_offset()); 1384 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1385 1386 1387 // get parameter size (always needed) 1388 __ movptr(rdx, constMethod); 1389 __ load_unsigned_short(rcx, size_of_parameters); 1390 1391 // rbx: Method* 1392 // rcx: size of parameters 1393 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1394 1395 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1396 __ subl(rdx, rcx); // rdx = no. of additional locals 1397 1398 // YYY 1399 // __ incrementl(rdx); 1400 // __ andl(rdx, -2); 1401 1402 // see if we've got enough room on the stack for locals plus overhead. 1403 generate_stack_overflow_check(); 1404 1405 // get return address 1406 __ pop(rax); 1407 1408 // compute beginning of parameters 1409 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1410 1411 // rdx - # of additional locals 1412 // allocate space for locals 1413 // explicitly initialize locals 1414 { 1415 Label exit, loop; 1416 __ testl(rdx, rdx); 1417 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1418 __ bind(loop); 1419 __ push((int) NULL_WORD); // initialize local variables 1420 __ decrementl(rdx); // until everything initialized 1421 __ jcc(Assembler::greater, loop); 1422 __ bind(exit); 1423 } 1424 1425 // initialize fixed part of activation frame 1426 generate_fixed_frame(false); 1427 1428 // make sure method is not native & not abstract 1429 #ifdef ASSERT 1430 __ movl(rax, access_flags); 1431 { 1432 Label L; 1433 __ testl(rax, JVM_ACC_NATIVE); 1434 __ jcc(Assembler::zero, L); 1435 __ stop("tried to execute native method as non-native"); 1436 __ bind(L); 1437 } 1438 { 1439 Label L; 1440 __ testl(rax, JVM_ACC_ABSTRACT); 1441 __ jcc(Assembler::zero, L); 1442 __ stop("tried to execute abstract method in interpreter"); 1443 __ bind(L); 1444 } 1445 #endif 1446 1447 // Since at this point in the method invocation the exception 1448 // handler would try to exit the monitor of synchronized methods 1449 // which hasn't been entered yet, we set the thread local variable 1450 // _do_not_unlock_if_synchronized to true. The remove_activation 1451 // will check this flag. 1452 1453 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1454 NOT_LP64(__ get_thread(thread)); 1455 const Address do_not_unlock_if_synchronized(thread, 1456 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1457 __ movbool(do_not_unlock_if_synchronized, true); 1458 1459 __ profile_parameters_type(rax, rcx, rdx); 1460 // increment invocation count & check for overflow 1461 Label invocation_counter_overflow; 1462 Label profile_method; 1463 Label profile_method_continue; 1464 if (inc_counter) { 1465 generate_counter_incr(&invocation_counter_overflow, 1466 &profile_method, 1467 &profile_method_continue); 1468 if (ProfileInterpreter) { 1469 __ bind(profile_method_continue); 1470 } 1471 } 1472 1473 Label continue_after_compile; 1474 __ bind(continue_after_compile); 1475 1476 // check for synchronized interpreted methods 1477 bang_stack_shadow_pages(false); 1478 1479 // reset the _do_not_unlock_if_synchronized flag 1480 NOT_LP64(__ get_thread(thread)); 1481 __ movbool(do_not_unlock_if_synchronized, false); 1482 1483 // check for synchronized methods 1484 // Must happen AFTER invocation_counter check and stack overflow check, 1485 // so method is not locked if overflows. 1486 if (synchronized) { 1487 // Allocate monitor and lock method 1488 lock_method(); 1489 } else { 1490 // no synchronization necessary 1491 #ifdef ASSERT 1492 { 1493 Label L; 1494 __ movl(rax, access_flags); 1495 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1496 __ jcc(Assembler::zero, L); 1497 __ stop("method needs synchronization"); 1498 __ bind(L); 1499 } 1500 #endif 1501 } 1502 1503 // start execution 1504 #ifdef ASSERT 1505 { 1506 Label L; 1507 const Address monitor_block_top (rbp, 1508 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1509 __ movptr(rax, monitor_block_top); 1510 __ cmpptr(rax, rsp); 1511 __ jcc(Assembler::equal, L); 1512 __ stop("broken stack frame setup in interpreter"); 1513 __ bind(L); 1514 } 1515 #endif 1516 1517 // jvmti support 1518 __ notify_method_entry(); 1519 1520 __ dispatch_next(vtos); 1521 1522 // invocation counter overflow 1523 if (inc_counter) { 1524 if (ProfileInterpreter) { 1525 // We have decided to profile this method in the interpreter 1526 __ bind(profile_method); 1527 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1528 __ set_method_data_pointer_for_bcp(); 1529 __ get_method(rbx); 1530 __ jmp(profile_method_continue); 1531 } 1532 // Handle overflow of counter and compile method 1533 __ bind(invocation_counter_overflow); 1534 generate_counter_overflow(continue_after_compile); 1535 } 1536 1537 return entry_point; 1538 } 1539 1540 //----------------------------------------------------------------------------- 1541 // Exceptions 1542 1543 void TemplateInterpreterGenerator::generate_throw_exception() { 1544 // Entry point in previous activation (i.e., if the caller was 1545 // interpreted) 1546 Interpreter::_rethrow_exception_entry = __ pc(); 1547 // Restore sp to interpreter_frame_last_sp even though we are going 1548 // to empty the expression stack for the exception processing. 1549 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1550 // rax: exception 1551 // rdx: return address/pc that threw exception 1552 __ restore_bcp(); // r13/rsi points to call/send 1553 __ restore_locals(); 1554 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1555 // Entry point for exceptions thrown within interpreter code 1556 Interpreter::_throw_exception_entry = __ pc(); 1557 // expression stack is undefined here 1558 // rax: exception 1559 // r13/rsi: exception bcp 1560 __ verify_oop(rax); 1561 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1562 LP64_ONLY(__ mov(c_rarg1, rax)); 1563 1564 // expression stack must be empty before entering the VM in case of 1565 // an exception 1566 __ empty_expression_stack(); 1567 // find exception handler address and preserve exception oop 1568 __ call_VM(rdx, 1569 CAST_FROM_FN_PTR(address, 1570 InterpreterRuntime::exception_handler_for_exception), 1571 rarg); 1572 // rax: exception handler entry point 1573 // rdx: preserved exception oop 1574 // r13/rsi: bcp for exception handler 1575 __ push_ptr(rdx); // push exception which is now the only value on the stack 1576 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1577 1578 // If the exception is not handled in the current frame the frame is 1579 // removed and the exception is rethrown (i.e. exception 1580 // continuation is _rethrow_exception). 1581 // 1582 // Note: At this point the bci is still the bxi for the instruction 1583 // which caused the exception and the expression stack is 1584 // empty. Thus, for any VM calls at this point, GC will find a legal 1585 // oop map (with empty expression stack). 1586 1587 // In current activation 1588 // tos: exception 1589 // esi: exception bcp 1590 1591 // 1592 // JVMTI PopFrame support 1593 // 1594 1595 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1596 __ empty_expression_stack(); 1597 // Set the popframe_processing bit in pending_popframe_condition 1598 // indicating that we are currently handling popframe, so that 1599 // call_VMs that may happen later do not trigger new popframe 1600 // handling cycles. 1601 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1602 NOT_LP64(__ get_thread(thread)); 1603 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1604 __ orl(rdx, JavaThread::popframe_processing_bit); 1605 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1606 1607 { 1608 // Check to see whether we are returning to a deoptimized frame. 1609 // (The PopFrame call ensures that the caller of the popped frame is 1610 // either interpreted or compiled and deoptimizes it if compiled.) 1611 // In this case, we can't call dispatch_next() after the frame is 1612 // popped, but instead must save the incoming arguments and restore 1613 // them after deoptimization has occurred. 1614 // 1615 // Note that we don't compare the return PC against the 1616 // deoptimization blob's unpack entry because of the presence of 1617 // adapter frames in C2. 1618 Label caller_not_deoptimized; 1619 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1620 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1621 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1622 InterpreterRuntime::interpreter_contains), rarg); 1623 __ testl(rax, rax); 1624 __ jcc(Assembler::notZero, caller_not_deoptimized); 1625 1626 // Compute size of arguments for saving when returning to 1627 // deoptimized caller 1628 __ get_method(rax); 1629 __ movptr(rax, Address(rax, Method::const_offset())); 1630 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1631 size_of_parameters_offset()))); 1632 __ shll(rax, Interpreter::logStackElementSize); 1633 __ restore_locals(); 1634 __ subptr(rlocals, rax); 1635 __ addptr(rlocals, wordSize); 1636 // Save these arguments 1637 NOT_LP64(__ get_thread(thread)); 1638 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1639 Deoptimization:: 1640 popframe_preserve_args), 1641 thread, rax, rlocals); 1642 1643 __ remove_activation(vtos, rdx, 1644 /* throw_monitor_exception */ false, 1645 /* install_monitor_exception */ false, 1646 /* notify_jvmdi */ false); 1647 1648 // Inform deoptimization that it is responsible for restoring 1649 // these arguments 1650 NOT_LP64(__ get_thread(thread)); 1651 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1652 JavaThread::popframe_force_deopt_reexecution_bit); 1653 1654 // Continue in deoptimization handler 1655 __ jmp(rdx); 1656 1657 __ bind(caller_not_deoptimized); 1658 } 1659 1660 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1661 /* throw_monitor_exception */ false, 1662 /* install_monitor_exception */ false, 1663 /* notify_jvmdi */ false); 1664 1665 // Finish with popframe handling 1666 // A previous I2C followed by a deoptimization might have moved the 1667 // outgoing arguments further up the stack. PopFrame expects the 1668 // mutations to those outgoing arguments to be preserved and other 1669 // constraints basically require this frame to look exactly as 1670 // though it had previously invoked an interpreted activation with 1671 // no space between the top of the expression stack (current 1672 // last_sp) and the top of stack. Rather than force deopt to 1673 // maintain this kind of invariant all the time we call a small 1674 // fixup routine to move the mutated arguments onto the top of our 1675 // expression stack if necessary. 1676 #ifndef _LP64 1677 __ mov(rax, rsp); 1678 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1679 __ get_thread(thread); 1680 // PC must point into interpreter here 1681 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1682 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1683 __ get_thread(thread); 1684 #else 1685 __ mov(c_rarg1, rsp); 1686 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1687 // PC must point into interpreter here 1688 __ set_last_Java_frame(noreg, rbp, __ pc()); 1689 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1690 #endif 1691 __ reset_last_Java_frame(thread, true); 1692 1693 // Restore the last_sp and null it out 1694 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1695 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1696 1697 __ restore_bcp(); 1698 __ restore_locals(); 1699 // The method data pointer was incremented already during 1700 // call profiling. We have to restore the mdp for the current bcp. 1701 if (ProfileInterpreter) { 1702 __ set_method_data_pointer_for_bcp(); 1703 } 1704 1705 // Clear the popframe condition flag 1706 NOT_LP64(__ get_thread(thread)); 1707 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1708 JavaThread::popframe_inactive); 1709 1710 #if INCLUDE_JVMTI 1711 { 1712 Label L_done; 1713 const Register local0 = rlocals; 1714 1715 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1716 __ jcc(Assembler::notEqual, L_done); 1717 1718 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1719 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1720 1721 __ get_method(rdx); 1722 __ movptr(rax, Address(local0, 0)); 1723 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1724 1725 __ testptr(rax, rax); 1726 __ jcc(Assembler::zero, L_done); 1727 1728 __ movptr(Address(rbx, 0), rax); 1729 __ bind(L_done); 1730 } 1731 #endif // INCLUDE_JVMTI 1732 1733 __ dispatch_next(vtos); 1734 // end of PopFrame support 1735 1736 Interpreter::_remove_activation_entry = __ pc(); 1737 1738 // preserve exception over this code sequence 1739 __ pop_ptr(rax); 1740 NOT_LP64(__ get_thread(thread)); 1741 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1742 // remove the activation (without doing throws on illegalMonitorExceptions) 1743 __ remove_activation(vtos, rdx, false, true, false); 1744 // restore exception 1745 NOT_LP64(__ get_thread(thread)); 1746 __ get_vm_result(rax, thread); 1747 1748 // In between activations - previous activation type unknown yet 1749 // compute continuation point - the continuation point expects the 1750 // following registers set up: 1751 // 1752 // rax: exception 1753 // rdx: return address/pc that threw exception 1754 // rsp: expression stack of caller 1755 // rbp: ebp of caller 1756 __ push(rax); // save exception 1757 __ push(rdx); // save return address 1758 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1759 SharedRuntime::exception_handler_for_return_address), 1760 thread, rdx); 1761 __ mov(rbx, rax); // save exception handler 1762 __ pop(rdx); // restore return address 1763 __ pop(rax); // restore exception 1764 // Note that an "issuing PC" is actually the next PC after the call 1765 __ jmp(rbx); // jump to exception 1766 // handler of caller 1767 } 1768 1769 1770 // 1771 // JVMTI ForceEarlyReturn support 1772 // 1773 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1774 address entry = __ pc(); 1775 1776 __ restore_bcp(); 1777 __ restore_locals(); 1778 __ empty_expression_stack(); 1779 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1780 1781 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1782 NOT_LP64(__ get_thread(thread)); 1783 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1784 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1785 1786 // Clear the earlyret state 1787 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1788 1789 __ remove_activation(state, rsi, 1790 false, /* throw_monitor_exception */ 1791 false, /* install_monitor_exception */ 1792 true); /* notify_jvmdi */ 1793 __ jmp(rsi); 1794 1795 return entry; 1796 } // end of ForceEarlyReturn support 1797 1798 1799 //----------------------------------------------------------------------------- 1800 // Helper for vtos entry point generation 1801 1802 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1803 address& bep, 1804 address& cep, 1805 address& sep, 1806 address& aep, 1807 address& iep, 1808 address& lep, 1809 address& fep, 1810 address& dep, 1811 address& qep, 1812 address& vep) { 1813 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1814 Label L; 1815 aep = __ pc(); __ push_ptr(); __ jmp(L); 1816 qep = __ pc(); __ push_ptr(); __ jmp(L); 1817 #ifndef _LP64 1818 fep = __ pc(); __ push(ftos); __ jmp(L); 1819 dep = __ pc(); __ push(dtos); __ jmp(L); 1820 #else 1821 fep = __ pc(); __ push_f(xmm0); __ jmp(L); 1822 dep = __ pc(); __ push_d(xmm0); __ jmp(L); 1823 #endif // _LP64 1824 lep = __ pc(); __ push_l(); __ jmp(L); 1825 bep = cep = sep = 1826 iep = __ pc(); __ push_i(); 1827 vep = __ pc(); 1828 __ bind(L); 1829 generate_and_dispatch(t); 1830 } 1831 1832 //----------------------------------------------------------------------------- 1833 1834 // Non-product code 1835 #ifndef PRODUCT 1836 1837 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1838 address entry = __ pc(); 1839 1840 #ifndef _LP64 1841 // prepare expression stack 1842 __ pop(rcx); // pop return address so expression stack is 'pure' 1843 __ push(state); // save tosca 1844 1845 // pass tosca registers as arguments & call tracer 1846 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1847 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1848 __ pop(state); // restore tosca 1849 1850 // return 1851 __ jmp(rcx); 1852 #else 1853 __ push(state); 1854 __ push(c_rarg0); 1855 __ push(c_rarg1); 1856 __ push(c_rarg2); 1857 __ push(c_rarg3); 1858 __ mov(c_rarg2, rax); // Pass itos 1859 #ifdef _WIN64 1860 __ movflt(xmm3, xmm0); // Pass ftos 1861 #endif 1862 __ call_VM(noreg, 1863 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1864 c_rarg1, c_rarg2, c_rarg3); 1865 __ pop(c_rarg3); 1866 __ pop(c_rarg2); 1867 __ pop(c_rarg1); 1868 __ pop(c_rarg0); 1869 __ pop(state); 1870 __ ret(0); // return from result handler 1871 #endif // _LP64 1872 1873 return entry; 1874 } 1875 1876 void TemplateInterpreterGenerator::count_bytecode() { 1877 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1878 } 1879 1880 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1881 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1882 } 1883 1884 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1885 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1886 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1887 __ orl(rbx, 1888 ((int) t->bytecode()) << 1889 BytecodePairHistogram::log2_number_of_codes); 1890 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1891 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1892 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1893 } 1894 1895 1896 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1897 // Call a little run-time stub to avoid blow-up for each bytecode. 1898 // The run-time runtime saves the right registers, depending on 1899 // the tosca in-state for the given template. 1900 1901 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1902 "entry must have been generated"); 1903 #ifndef _LP64 1904 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1905 #else 1906 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1907 __ andptr(rsp, -16); // align stack as required by ABI 1908 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1909 __ mov(rsp, r12); // restore sp 1910 __ reinit_heapbase(); 1911 #endif // _LP64 1912 } 1913 1914 1915 void TemplateInterpreterGenerator::stop_interpreter_at() { 1916 Label L; 1917 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1918 StopInterpreterAt); 1919 __ jcc(Assembler::notEqual, L); 1920 __ int3(); 1921 __ bind(L); 1922 } 1923 #endif // !PRODUCT