1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "oops/valueKlass.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/debug.hpp" 49 #include "utilities/macros.hpp" 50 51 #define __ _masm-> 52 53 // Size of interpreter code. Increase if too small. Interpreter will 54 // fail with a guarantee ("not enough space for interpreter generation"); 55 // if too small. 56 // Run with +PrintInterpreter to get the VM to print out the size. 57 // Max size with JVMTI 58 #ifdef AMD64 59 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(280) NOT_JVMCI(268) * 1024; 60 #else 61 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 62 #endif // AMD64 63 64 // Global Register Names 65 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 66 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 67 68 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 69 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 70 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 71 72 73 //----------------------------------------------------------------------------- 74 75 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 76 address entry = __ pc(); 77 78 #ifdef ASSERT 79 { 80 Label L; 81 __ lea(rax, Address(rbp, 82 frame::interpreter_frame_monitor_block_top_offset * 83 wordSize)); 84 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 85 // grows negative) 86 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 87 __ stop ("interpreter frame not set up"); 88 __ bind(L); 89 } 90 #endif // ASSERT 91 // Restore bcp under the assumption that the current frame is still 92 // interpreted 93 __ restore_bcp(); 94 95 // expression stack must be empty before entering the VM if an 96 // exception happened 97 __ empty_expression_stack(); 98 // throw exception 99 __ call_VM(noreg, 100 CAST_FROM_FN_PTR(address, 101 InterpreterRuntime::throw_StackOverflowError)); 102 return entry; 103 } 104 105 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 106 const char* name) { 107 address entry = __ pc(); 108 // expression stack must be empty before entering the VM if an 109 // exception happened 110 __ empty_expression_stack(); 111 // setup parameters 112 // ??? convention: expect aberrant index in register ebx 113 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 114 __ lea(rarg, ExternalAddress((address)name)); 115 __ call_VM(noreg, 116 CAST_FROM_FN_PTR(address, 117 InterpreterRuntime:: 118 throw_ArrayIndexOutOfBoundsException), 119 rarg, rbx); 120 return entry; 121 } 122 123 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 124 address entry = __ pc(); 125 126 // object is at TOS 127 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 128 __ pop(rarg); 129 130 // expression stack must be empty before entering the VM if an 131 // exception happened 132 __ empty_expression_stack(); 133 134 __ call_VM(noreg, 135 CAST_FROM_FN_PTR(address, 136 InterpreterRuntime:: 137 throw_ClassCastException), 138 rarg); 139 return entry; 140 } 141 142 address TemplateInterpreterGenerator::generate_exception_handler_common( 143 const char* name, const char* message, bool pass_oop) { 144 assert(!pass_oop || message == NULL, "either oop or message but not both"); 145 address entry = __ pc(); 146 147 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 148 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 149 150 if (pass_oop) { 151 // object is at TOS 152 __ pop(rarg2); 153 } 154 // expression stack must be empty before entering the VM if an 155 // exception happened 156 __ empty_expression_stack(); 157 // setup parameters 158 __ lea(rarg, ExternalAddress((address)name)); 159 if (pass_oop) { 160 __ call_VM(rax, CAST_FROM_FN_PTR(address, 161 InterpreterRuntime:: 162 create_klass_exception), 163 rarg, rarg2); 164 } else { 165 __ lea(rarg2, ExternalAddress((address)message)); 166 __ call_VM(rax, 167 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 168 rarg, rarg2); 169 } 170 // throw exception 171 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 172 return entry; 173 } 174 175 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 176 address entry = __ pc(); 177 178 #ifndef _LP64 179 #ifdef COMPILER2 180 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 181 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 182 for (int i = 1; i < 8; i++) { 183 __ ffree(i); 184 } 185 } else if (UseSSE < 2) { 186 __ empty_FPU_stack(); 187 } 188 #endif // COMPILER2 189 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 190 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 191 } else { 192 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 193 } 194 195 if (state == ftos) { 196 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 197 } else if (state == dtos) { 198 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 199 } 200 #endif // _LP64 201 202 // Restore stack bottom in case i2c adjusted stack 203 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 204 // and NULL it as marker that esp is now tos until next java call 205 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 206 207 if (state == qtos && ValueTypeReturnedAsFields) { 208 #ifndef _LP64 209 __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf()); 210 #else 211 // A value type is being returned. If fields are in registers we 212 // need to allocate a value type instance and initialize it with 213 // the value of the fields. 214 Label skip, slow_case; 215 // We only need a new buffered value if a new one is not returned 216 __ testptr(rax, 1); 217 __ jcc(Assembler::zero, skip); 218 219 // Try to allocate a new buffered value (from the heap) 220 if (UseTLAB) { 221 __ mov(rbx, rax); 222 __ andptr(rbx, -2); 223 224 __ movl(r14, Address(rbx, Klass::layout_helper_offset())); 225 226 __ movptr(r13, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 227 __ lea(r14, Address(r13, r14, Address::times_1)); 228 __ cmpptr(r14, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); 229 __ jcc(Assembler::above, slow_case); 230 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), r14); 231 232 if (UseBiasedLocking) { 233 __ movptr(rax, Address(rbx, Klass::prototype_header_offset())); 234 __ movptr(Address(r13, oopDesc::mark_offset_in_bytes ()), rax); 235 } else { 236 __ movptr(Address(r13, oopDesc::mark_offset_in_bytes ()), 237 (intptr_t)markOopDesc::prototype()); 238 } 239 __ xorl(rax, rax); // use zero reg to clear memory (shorter code) 240 __ store_klass_gap(r13, rax); // zero klass gap for compressed oops 241 __ mov(rax, rbx); 242 __ store_klass(r13, rbx); // klass 243 244 // We have our new buffered value, initialize its fields with a 245 // value class specific handler 246 __ movptr(rbx, Address(rax, ValueKlass::pack_handler_offset())); 247 __ mov(rax, r13); 248 __ call(rbx); 249 __ jmp(skip); 250 } 251 252 __ bind(slow_case); 253 // We failed to allocate a new value, fall back to a runtime 254 // call. Some oop field may be live in some registers but we can't 255 // tell. That runtime call will take care of preserving them 256 // across a GC if there's one. 257 __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf()); 258 __ bind(skip); 259 260 if (ReturnValuesInThreadLocalBuffer) { 261 // vt_alloc_ptr adjustment 262 Label no_adjustment; 263 __ cmpptr(rax, Address(r15_thread, in_bytes(JavaThread::vt_alloc_ptr_offset()))); 264 __ jcc(Assembler::notEqual, no_adjustment); 265 __ load_klass(rbx, rax); 266 __ movl(r13, Address(rbx, Klass::layout_helper_offset())); 267 __ lea(r14, Address(rax, r13, Address::times_1)); 268 __ movptr(Address(r15_thread, in_bytes(JavaThread::vt_alloc_ptr_offset())), r14); 269 __ bind(no_adjustment); 270 } 271 272 #endif 273 } 274 275 __ restore_bcp(); 276 __ restore_locals(); 277 278 if (state == atos) { 279 Register mdp = rbx; 280 Register tmp = rcx; 281 __ profile_return_type(mdp, rax, tmp); 282 } 283 284 const Register cache = rbx; 285 const Register index = rcx; 286 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 287 288 const Register flags = cache; 289 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 290 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 291 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 292 293 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 294 if (JvmtiExport::can_pop_frame()) { 295 NOT_LP64(__ get_thread(java_thread)); 296 __ check_and_handle_popframe(java_thread); 297 } 298 if (JvmtiExport::can_force_early_return()) { 299 NOT_LP64(__ get_thread(java_thread)); 300 __ check_and_handle_earlyret(java_thread); 301 } 302 303 __ dispatch_next(state, step); 304 305 return entry; 306 } 307 308 309 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 310 address entry = __ pc(); 311 312 #ifndef _LP64 313 if (state == ftos) { 314 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 315 } else if (state == dtos) { 316 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 317 } 318 #endif // _LP64 319 320 // NULL last_sp until next java call 321 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 322 __ restore_bcp(); 323 __ restore_locals(); 324 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 325 NOT_LP64(__ get_thread(thread)); 326 #if INCLUDE_JVMCI 327 // Check if we need to take lock at entry of synchronized method. This can 328 // only occur on method entry so emit it only for vtos with step 0. 329 if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) { 330 Label L; 331 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 332 __ jcc(Assembler::zero, L); 333 // Clear flag. 334 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 335 // Satisfy calling convention for lock_method(). 336 __ get_method(rbx); 337 // Take lock. 338 lock_method(); 339 __ bind(L); 340 } else { 341 #ifdef ASSERT 342 if (EnableJVMCI) { 343 Label L; 344 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 345 __ jccb(Assembler::zero, L); 346 __ stop("unexpected pending monitor in deopt entry"); 347 __ bind(L); 348 } 349 #endif 350 } 351 #endif 352 // handle exceptions 353 { 354 Label L; 355 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 356 __ jcc(Assembler::zero, L); 357 __ call_VM(noreg, 358 CAST_FROM_FN_PTR(address, 359 InterpreterRuntime::throw_pending_exception)); 360 __ should_not_reach_here(); 361 __ bind(L); 362 } 363 if (continuation == NULL) { 364 __ dispatch_next(state, step); 365 } else { 366 __ jump_to_entry(continuation); 367 } 368 return entry; 369 } 370 371 address TemplateInterpreterGenerator::generate_result_handler_for( 372 BasicType type) { 373 address entry = __ pc(); 374 switch (type) { 375 case T_BOOLEAN: __ c2bool(rax); break; 376 #ifndef _LP64 377 case T_CHAR : __ andptr(rax, 0xFFFF); break; 378 #else 379 case T_CHAR : __ movzwl(rax, rax); break; 380 #endif // _LP64 381 case T_BYTE : __ sign_extend_byte(rax); break; 382 case T_SHORT : __ sign_extend_short(rax); break; 383 case T_INT : /* nothing to do */ break; 384 case T_LONG : /* nothing to do */ break; 385 case T_VOID : /* nothing to do */ break; 386 #ifndef _LP64 387 case T_DOUBLE : 388 case T_FLOAT : 389 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 390 __ pop(t); // remove return address first 391 // Must return a result for interpreter or compiler. In SSE 392 // mode, results are returned in xmm0 and the FPU stack must 393 // be empty. 394 if (type == T_FLOAT && UseSSE >= 1) { 395 // Load ST0 396 __ fld_d(Address(rsp, 0)); 397 // Store as float and empty fpu stack 398 __ fstp_s(Address(rsp, 0)); 399 // and reload 400 __ movflt(xmm0, Address(rsp, 0)); 401 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 402 __ movdbl(xmm0, Address(rsp, 0)); 403 } else { 404 // restore ST0 405 __ fld_d(Address(rsp, 0)); 406 } 407 // and pop the temp 408 __ addptr(rsp, 2 * wordSize); 409 __ push(t); // restore return address 410 } 411 break; 412 #else 413 case T_FLOAT : /* nothing to do */ break; 414 case T_DOUBLE : /* nothing to do */ break; 415 #endif // _LP64 416 417 case T_VALUETYPE: // fall through (value types are handled with oops) 418 case T_OBJECT : 419 // retrieve result from frame 420 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 421 // and verify it 422 __ verify_oop(rax); 423 break; 424 default : ShouldNotReachHere(); 425 } 426 __ ret(0); // return from result handler 427 return entry; 428 } 429 430 address TemplateInterpreterGenerator::generate_safept_entry_for( 431 TosState state, 432 address runtime_entry) { 433 address entry = __ pc(); 434 __ push(state); 435 __ call_VM(noreg, runtime_entry); 436 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 437 return entry; 438 } 439 440 441 442 // Helpers for commoning out cases in the various type of method entries. 443 // 444 445 446 // increment invocation count & check for overflow 447 // 448 // Note: checking for negative value instead of overflow 449 // so we have a 'sticky' overflow test 450 // 451 // rbx: method 452 // rcx: invocation counter 453 // 454 void TemplateInterpreterGenerator::generate_counter_incr( 455 Label* overflow, 456 Label* profile_method, 457 Label* profile_method_continue) { 458 Label done; 459 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 460 if (TieredCompilation) { 461 int increment = InvocationCounter::count_increment; 462 Label no_mdo; 463 if (ProfileInterpreter) { 464 // Are we profiling? 465 __ movptr(rax, Address(rbx, Method::method_data_offset())); 466 __ testptr(rax, rax); 467 __ jccb(Assembler::zero, no_mdo); 468 // Increment counter in the MDO 469 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 470 in_bytes(InvocationCounter::counter_offset())); 471 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 472 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 473 __ jmp(done); 474 } 475 __ bind(no_mdo); 476 // Increment counter in MethodCounters 477 const Address invocation_counter(rax, 478 MethodCounters::invocation_counter_offset() + 479 InvocationCounter::counter_offset()); 480 __ get_method_counters(rbx, rax, done); 481 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 482 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 483 false, Assembler::zero, overflow); 484 __ bind(done); 485 } else { // not TieredCompilation 486 const Address backedge_counter(rax, 487 MethodCounters::backedge_counter_offset() + 488 InvocationCounter::counter_offset()); 489 const Address invocation_counter(rax, 490 MethodCounters::invocation_counter_offset() + 491 InvocationCounter::counter_offset()); 492 493 __ get_method_counters(rbx, rax, done); 494 495 if (ProfileInterpreter) { 496 __ incrementl(Address(rax, 497 MethodCounters::interpreter_invocation_counter_offset())); 498 } 499 // Update standard invocation counters 500 __ movl(rcx, invocation_counter); 501 __ incrementl(rcx, InvocationCounter::count_increment); 502 __ movl(invocation_counter, rcx); // save invocation count 503 504 __ movl(rax, backedge_counter); // load backedge counter 505 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 506 507 __ addl(rcx, rax); // add both counters 508 509 // profile_method is non-null only for interpreted method so 510 // profile_method != NULL == !native_call 511 512 if (ProfileInterpreter && profile_method != NULL) { 513 // Test to see if we should create a method data oop 514 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 515 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 516 __ jcc(Assembler::less, *profile_method_continue); 517 518 // if no method data exists, go to profile_method 519 __ test_method_data_pointer(rax, *profile_method); 520 } 521 522 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 523 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 524 __ jcc(Assembler::aboveEqual, *overflow); 525 __ bind(done); 526 } 527 } 528 529 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 530 531 // Asm interpreter on entry 532 // r14/rdi - locals 533 // r13/rsi - bcp 534 // rbx - method 535 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 536 // rbp - interpreter frame 537 538 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 539 // Everything as it was on entry 540 // rdx is not restored. Doesn't appear to really be set. 541 542 // InterpreterRuntime::frequency_counter_overflow takes two 543 // arguments, the first (thread) is passed by call_VM, the second 544 // indicates if the counter overflow occurs at a backwards branch 545 // (NULL bcp). We pass zero for it. The call returns the address 546 // of the verified entry point for the method or NULL if the 547 // compilation did not complete (either went background or bailed 548 // out). 549 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 550 __ movl(rarg, 0); 551 __ call_VM(noreg, 552 CAST_FROM_FN_PTR(address, 553 InterpreterRuntime::frequency_counter_overflow), 554 rarg); 555 556 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 557 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 558 // and jump to the interpreted entry. 559 __ jmp(do_continue, relocInfo::none); 560 } 561 562 // See if we've got enough room on the stack for locals plus overhead below 563 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 564 // without going through the signal handler, i.e., reserved and yellow zones 565 // will not be made usable. The shadow zone must suffice to handle the 566 // overflow. 567 // The expression stack grows down incrementally, so the normal guard 568 // page mechanism will work for that. 569 // 570 // NOTE: Since the additional locals are also always pushed (wasn't 571 // obvious in generate_fixed_frame) so the guard should work for them 572 // too. 573 // 574 // Args: 575 // rdx: number of additional locals this frame needs (what we must check) 576 // rbx: Method* 577 // 578 // Kills: 579 // rax 580 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 581 582 // monitor entry size: see picture of stack in frame_x86.hpp 583 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 584 585 // total overhead size: entry_size + (saved rbp through expr stack 586 // bottom). be sure to change this if you add/subtract anything 587 // to/from the overhead area 588 const int overhead_size = 589 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 590 591 const int page_size = os::vm_page_size(); 592 593 Label after_frame_check; 594 595 // see if the frame is greater than one page in size. If so, 596 // then we need to verify there is enough stack space remaining 597 // for the additional locals. 598 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 599 __ jcc(Assembler::belowEqual, after_frame_check); 600 601 // compute rsp as if this were going to be the last frame on 602 // the stack before the red zone 603 604 Label after_frame_check_pop; 605 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 606 #ifndef _LP64 607 __ push(thread); 608 __ get_thread(thread); 609 #endif 610 611 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 612 613 // locals + overhead, in bytes 614 __ mov(rax, rdx); 615 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 616 __ addptr(rax, overhead_size); 617 618 #ifdef ASSERT 619 Label limit_okay; 620 // Verify that thread stack overflow limit is non-zero. 621 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 622 __ jcc(Assembler::notEqual, limit_okay); 623 __ stop("stack overflow limit is zero"); 624 __ bind(limit_okay); 625 #endif 626 627 // Add locals/frame size to stack limit. 628 __ addptr(rax, stack_limit); 629 630 // Check against the current stack bottom. 631 __ cmpptr(rsp, rax); 632 633 __ jcc(Assembler::above, after_frame_check_pop); 634 NOT_LP64(__ pop(rsi)); // get saved bcp 635 636 // Restore sender's sp as SP. This is necessary if the sender's 637 // frame is an extended compiled frame (see gen_c2i_adapter()) 638 // and safer anyway in case of JSR292 adaptations. 639 640 __ pop(rax); // return address must be moved if SP is changed 641 __ mov(rsp, rbcp); 642 __ push(rax); 643 644 // Note: the restored frame is not necessarily interpreted. 645 // Use the shared runtime version of the StackOverflowError. 646 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 647 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 648 // all done with frame size check 649 __ bind(after_frame_check_pop); 650 NOT_LP64(__ pop(rsi)); 651 652 // all done with frame size check 653 __ bind(after_frame_check); 654 } 655 656 // Allocate monitor and lock method (asm interpreter) 657 // 658 // Args: 659 // rbx: Method* 660 // r14/rdi: locals 661 // 662 // Kills: 663 // rax 664 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 665 // rscratch1, rscratch2 (scratch regs) 666 void TemplateInterpreterGenerator::lock_method() { 667 // synchronize method 668 const Address access_flags(rbx, Method::access_flags_offset()); 669 const Address monitor_block_top( 670 rbp, 671 frame::interpreter_frame_monitor_block_top_offset * wordSize); 672 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 673 674 #ifdef ASSERT 675 { 676 Label L; 677 __ movl(rax, access_flags); 678 __ testl(rax, JVM_ACC_SYNCHRONIZED); 679 __ jcc(Assembler::notZero, L); 680 __ stop("method doesn't need synchronization"); 681 __ bind(L); 682 } 683 #endif // ASSERT 684 685 // get synchronization object 686 { 687 Label done; 688 __ movl(rax, access_flags); 689 __ testl(rax, JVM_ACC_STATIC); 690 // get receiver (assume this is frequent case) 691 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 692 __ jcc(Assembler::zero, done); 693 __ load_mirror(rax, rbx); 694 695 #ifdef ASSERT 696 { 697 Label L; 698 __ testptr(rax, rax); 699 __ jcc(Assembler::notZero, L); 700 __ stop("synchronization object is NULL"); 701 __ bind(L); 702 } 703 #endif // ASSERT 704 705 __ bind(done); 706 } 707 708 // add space for monitor & lock 709 __ subptr(rsp, entry_size); // add space for a monitor entry 710 __ movptr(monitor_block_top, rsp); // set new monitor block top 711 // store object 712 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 713 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 714 __ movptr(lockreg, rsp); // object address 715 __ lock_object(lockreg); 716 } 717 718 // Generate a fixed interpreter frame. This is identical setup for 719 // interpreted methods and for native methods hence the shared code. 720 // 721 // Args: 722 // rax: return address 723 // rbx: Method* 724 // r14/rdi: pointer to locals 725 // r13/rsi: sender sp 726 // rdx: cp cache 727 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 728 // initialize fixed part of activation frame 729 __ push(rax); // save return address 730 __ enter(); // save old & set new rbp 731 __ push(rbcp); // set sender sp 732 __ push((int)NULL_WORD); // leave last_sp as null 733 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 734 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 735 __ push(rbx); // save Method* 736 // Get mirror and store it in the frame as GC root for this Method* 737 __ load_mirror(rdx, rbx); 738 __ push(rdx); 739 if (ProfileInterpreter) { 740 Label method_data_continue; 741 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 742 __ testptr(rdx, rdx); 743 __ jcc(Assembler::zero, method_data_continue); 744 __ addptr(rdx, in_bytes(MethodData::data_offset())); 745 __ bind(method_data_continue); 746 __ push(rdx); // set the mdp (method data pointer) 747 } else { 748 __ push(0); 749 } 750 751 __ movptr(rdx, Address(rbx, Method::const_offset())); 752 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 753 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 754 __ push(rdx); // set constant pool cache 755 const Register thread1 = NOT_LP64(rdx) LP64_ONLY(r15_thread); 756 NOT_LP64(__ get_thread(thread1)); 757 __ movptr(rdx, Address(thread1, JavaThread::vt_alloc_ptr_offset())); 758 __ push(rdx); // value type allocation pointer when activation is created 759 __ push(rlocals); // set locals pointer 760 if (native_call) { 761 __ push(0); // no bcp 762 } else { 763 __ push(rbcp); // set bcp 764 } 765 __ push(0); // reserve word for pointer to expression stack bottom 766 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 767 } 768 769 // End of helpers 770 771 // Method entry for java.lang.ref.Reference.get. 772 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 773 #if INCLUDE_ALL_GCS 774 // Code: _aload_0, _getfield, _areturn 775 // parameter size = 1 776 // 777 // The code that gets generated by this routine is split into 2 parts: 778 // 1. The "intrinsified" code for G1 (or any SATB based GC), 779 // 2. The slow path - which is an expansion of the regular method entry. 780 // 781 // Notes:- 782 // * In the G1 code we do not check whether we need to block for 783 // a safepoint. If G1 is enabled then we must execute the specialized 784 // code for Reference.get (except when the Reference object is null) 785 // so that we can log the value in the referent field with an SATB 786 // update buffer. 787 // If the code for the getfield template is modified so that the 788 // G1 pre-barrier code is executed when the current method is 789 // Reference.get() then going through the normal method entry 790 // will be fine. 791 // * The G1 code can, however, check the receiver object (the instance 792 // of java.lang.Reference) and jump to the slow path if null. If the 793 // Reference object is null then we obviously cannot fetch the referent 794 // and so we don't need to call the G1 pre-barrier. Thus we can use the 795 // regular method entry code to generate the NPE. 796 // 797 // rbx: Method* 798 799 // r13: senderSP must preserve for slow path, set SP to it on fast path 800 801 address entry = __ pc(); 802 803 const int referent_offset = java_lang_ref_Reference::referent_offset; 804 guarantee(referent_offset > 0, "referent offset not initialized"); 805 806 if (UseG1GC) { 807 Label slow_path; 808 // rbx: method 809 810 // Check if local 0 != NULL 811 // If the receiver is null then it is OK to jump to the slow path. 812 __ movptr(rax, Address(rsp, wordSize)); 813 814 __ testptr(rax, rax); 815 __ jcc(Assembler::zero, slow_path); 816 817 // rax: local 0 818 // rbx: method (but can be used as scratch now) 819 // rdx: scratch 820 // rdi: scratch 821 822 // Preserve the sender sp in case the pre-barrier 823 // calls the runtime 824 NOT_LP64(__ push(rsi)); 825 826 // Generate the G1 pre-barrier code to log the value of 827 // the referent field in an SATB buffer. 828 829 // Load the value of the referent field. 830 const Address field_address(rax, referent_offset); 831 __ load_heap_oop(rax, field_address); 832 833 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 834 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 835 NOT_LP64(__ get_thread(thread)); 836 837 // Generate the G1 pre-barrier code to log the value of 838 // the referent field in an SATB buffer. 839 __ g1_write_barrier_pre(noreg /* obj */, 840 rax /* pre_val */, 841 thread /* thread */, 842 rbx /* tmp */, 843 true /* tosca_live */, 844 true /* expand_call */); 845 846 // _areturn 847 NOT_LP64(__ pop(rsi)); // get sender sp 848 __ pop(rdi); // get return address 849 __ mov(rsp, sender_sp); // set sp to sender sp 850 __ jmp(rdi); 851 __ ret(0); 852 853 // generate a vanilla interpreter entry as the slow path 854 __ bind(slow_path); 855 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 856 return entry; 857 } 858 #endif // INCLUDE_ALL_GCS 859 860 // If G1 is not enabled then attempt to go through the accessor entry point 861 // Reference.get is an accessor 862 return NULL; 863 } 864 865 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 866 // Quick & dirty stack overflow checking: bang the stack & handle trap. 867 // Note that we do the banging after the frame is setup, since the exception 868 // handling code expects to find a valid interpreter frame on the stack. 869 // Doing the banging earlier fails if the caller frame is not an interpreter 870 // frame. 871 // (Also, the exception throwing code expects to unlock any synchronized 872 // method receiever, so do the banging after locking the receiver.) 873 874 // Bang each page in the shadow zone. We can't assume it's been done for 875 // an interpreter frame with greater than a page of locals, so each page 876 // needs to be checked. Only true for non-native. 877 if (UseStackBanging) { 878 const int page_size = os::vm_page_size(); 879 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 880 const int start_page = native_call ? n_shadow_pages : 1; 881 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 882 __ bang_stack_with_offset(pages*page_size); 883 } 884 } 885 } 886 887 // Interpreter stub for calling a native method. (asm interpreter) 888 // This sets up a somewhat different looking stack for calling the 889 // native method than the typical interpreter frame setup. 890 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 891 // determine code generation flags 892 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 893 894 // rbx: Method* 895 // rbcp: sender sp 896 897 address entry_point = __ pc(); 898 899 const Address constMethod (rbx, Method::const_offset()); 900 const Address access_flags (rbx, Method::access_flags_offset()); 901 const Address size_of_parameters(rcx, ConstMethod:: 902 size_of_parameters_offset()); 903 904 905 // get parameter size (always needed) 906 __ movptr(rcx, constMethod); 907 __ load_unsigned_short(rcx, size_of_parameters); 908 909 // native calls don't need the stack size check since they have no 910 // expression stack and the arguments are already on the stack and 911 // we only add a handful of words to the stack 912 913 // rbx: Method* 914 // rcx: size of parameters 915 // rbcp: sender sp 916 __ pop(rax); // get return address 917 918 // for natives the size of locals is zero 919 920 // compute beginning of parameters 921 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 922 923 // add 2 zero-initialized slots for native calls 924 // initialize result_handler slot 925 __ push((int) NULL_WORD); 926 // slot for oop temp 927 // (static native method holder mirror/jni oop result) 928 __ push((int) NULL_WORD); 929 930 // initialize fixed part of activation frame 931 generate_fixed_frame(true); 932 933 // make sure method is native & not abstract 934 #ifdef ASSERT 935 __ movl(rax, access_flags); 936 { 937 Label L; 938 __ testl(rax, JVM_ACC_NATIVE); 939 __ jcc(Assembler::notZero, L); 940 __ stop("tried to execute non-native method as native"); 941 __ bind(L); 942 } 943 { 944 Label L; 945 __ testl(rax, JVM_ACC_ABSTRACT); 946 __ jcc(Assembler::zero, L); 947 __ stop("tried to execute abstract method in interpreter"); 948 __ bind(L); 949 } 950 #endif 951 952 // Since at this point in the method invocation the exception handler 953 // would try to exit the monitor of synchronized methods which hasn't 954 // been entered yet, we set the thread local variable 955 // _do_not_unlock_if_synchronized to true. The remove_activation will 956 // check this flag. 957 958 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 959 NOT_LP64(__ get_thread(thread1)); 960 const Address do_not_unlock_if_synchronized(thread1, 961 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 962 __ movbool(do_not_unlock_if_synchronized, true); 963 964 // increment invocation count & check for overflow 965 Label invocation_counter_overflow; 966 if (inc_counter) { 967 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 968 } 969 970 Label continue_after_compile; 971 __ bind(continue_after_compile); 972 973 bang_stack_shadow_pages(true); 974 975 // reset the _do_not_unlock_if_synchronized flag 976 NOT_LP64(__ get_thread(thread1)); 977 __ movbool(do_not_unlock_if_synchronized, false); 978 979 // check for synchronized methods 980 // Must happen AFTER invocation_counter check and stack overflow check, 981 // so method is not locked if overflows. 982 if (synchronized) { 983 lock_method(); 984 } else { 985 // no synchronization necessary 986 #ifdef ASSERT 987 { 988 Label L; 989 __ movl(rax, access_flags); 990 __ testl(rax, JVM_ACC_SYNCHRONIZED); 991 __ jcc(Assembler::zero, L); 992 __ stop("method needs synchronization"); 993 __ bind(L); 994 } 995 #endif 996 } 997 998 // start execution 999 #ifdef ASSERT 1000 { 1001 Label L; 1002 const Address monitor_block_top(rbp, 1003 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1004 __ movptr(rax, monitor_block_top); 1005 __ cmpptr(rax, rsp); 1006 __ jcc(Assembler::equal, L); 1007 __ stop("broken stack frame setup in interpreter"); 1008 __ bind(L); 1009 } 1010 #endif 1011 1012 // jvmti support 1013 __ notify_method_entry(); 1014 1015 // work registers 1016 const Register method = rbx; 1017 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 1018 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 1019 1020 // allocate space for parameters 1021 __ get_method(method); 1022 __ movptr(t, Address(method, Method::const_offset())); 1023 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1024 1025 #ifndef _LP64 1026 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 1027 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 1028 __ subptr(rsp, t); 1029 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 1030 #else 1031 __ shll(t, Interpreter::logStackElementSize); 1032 1033 __ subptr(rsp, t); 1034 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1035 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 1036 #endif // _LP64 1037 1038 // get signature handler 1039 { 1040 Label L; 1041 __ movptr(t, Address(method, Method::signature_handler_offset())); 1042 __ testptr(t, t); 1043 __ jcc(Assembler::notZero, L); 1044 __ call_VM(noreg, 1045 CAST_FROM_FN_PTR(address, 1046 InterpreterRuntime::prepare_native_call), 1047 method); 1048 __ get_method(method); 1049 __ movptr(t, Address(method, Method::signature_handler_offset())); 1050 __ bind(L); 1051 } 1052 1053 // call signature handler 1054 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 1055 "adjust this code"); 1056 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 1057 "adjust this code"); 1058 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 1059 "adjust this code"); 1060 1061 // The generated handlers do not touch RBX (the method oop). 1062 // However, large signatures cannot be cached and are generated 1063 // each time here. The slow-path generator can do a GC on return, 1064 // so we must reload it after the call. 1065 __ call(t); 1066 __ get_method(method); // slow path can do a GC, reload RBX 1067 1068 1069 // result handler is in rax 1070 // set result handler 1071 __ movptr(Address(rbp, 1072 (frame::interpreter_frame_result_handler_offset) * wordSize), 1073 rax); 1074 1075 // pass mirror handle if static call 1076 { 1077 Label L; 1078 __ movl(t, Address(method, Method::access_flags_offset())); 1079 __ testl(t, JVM_ACC_STATIC); 1080 __ jcc(Assembler::zero, L); 1081 // get mirror 1082 __ load_mirror(t, method); 1083 // copy mirror into activation frame 1084 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1085 t); 1086 // pass handle to mirror 1087 #ifndef _LP64 1088 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1089 __ movptr(Address(rsp, wordSize), t); 1090 #else 1091 __ lea(c_rarg1, 1092 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1093 #endif // _LP64 1094 __ bind(L); 1095 } 1096 1097 // get native function entry point 1098 { 1099 Label L; 1100 __ movptr(rax, Address(method, Method::native_function_offset())); 1101 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1102 __ cmpptr(rax, unsatisfied.addr()); 1103 __ jcc(Assembler::notEqual, L); 1104 __ call_VM(noreg, 1105 CAST_FROM_FN_PTR(address, 1106 InterpreterRuntime::prepare_native_call), 1107 method); 1108 __ get_method(method); 1109 __ movptr(rax, Address(method, Method::native_function_offset())); 1110 __ bind(L); 1111 } 1112 1113 // pass JNIEnv 1114 #ifndef _LP64 1115 __ get_thread(thread); 1116 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1117 __ movptr(Address(rsp, 0), t); 1118 1119 // set_last_Java_frame_before_call 1120 // It is enough that the pc() 1121 // points into the right code segment. It does not have to be the correct return pc. 1122 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1123 #else 1124 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1125 1126 // It is enough that the pc() points into the right code 1127 // segment. It does not have to be the correct return pc. 1128 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1129 #endif // _LP64 1130 1131 // change thread state 1132 #ifdef ASSERT 1133 { 1134 Label L; 1135 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1136 __ cmpl(t, _thread_in_Java); 1137 __ jcc(Assembler::equal, L); 1138 __ stop("Wrong thread state in native stub"); 1139 __ bind(L); 1140 } 1141 #endif 1142 1143 // Change state to native 1144 1145 __ movl(Address(thread, JavaThread::thread_state_offset()), 1146 _thread_in_native); 1147 1148 // Call the native method. 1149 __ call(rax); 1150 // 32: result potentially in rdx:rax or ST0 1151 // 64: result potentially in rax or xmm0 1152 1153 // Verify or restore cpu control state after JNI call 1154 __ restore_cpu_control_state_after_jni(); 1155 1156 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1157 // in order to extract the result of a method call. If the order of these 1158 // pushes change or anything else is added to the stack then the code in 1159 // interpreter_frame_result must also change. 1160 1161 #ifndef _LP64 1162 // save potential result in ST(0) & rdx:rax 1163 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1164 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1165 // It is safe to do this push because state is _thread_in_native and return address will be found 1166 // via _last_native_pc and not via _last_jave_sp 1167 1168 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1169 // If the order changes or anything else is added to the stack the code in 1170 // interpreter_frame_result will have to be changed. 1171 1172 { Label L; 1173 Label push_double; 1174 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1175 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1176 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1177 float_handler.addr()); 1178 __ jcc(Assembler::equal, push_double); 1179 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1180 double_handler.addr()); 1181 __ jcc(Assembler::notEqual, L); 1182 __ bind(push_double); 1183 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1184 __ bind(L); 1185 } 1186 #else 1187 __ push(dtos); 1188 #endif // _LP64 1189 1190 __ push(ltos); 1191 1192 // change thread state 1193 NOT_LP64(__ get_thread(thread)); 1194 __ movl(Address(thread, JavaThread::thread_state_offset()), 1195 _thread_in_native_trans); 1196 1197 if (os::is_MP()) { 1198 if (UseMembar) { 1199 // Force this write out before the read below 1200 __ membar(Assembler::Membar_mask_bits( 1201 Assembler::LoadLoad | Assembler::LoadStore | 1202 Assembler::StoreLoad | Assembler::StoreStore)); 1203 } else { 1204 // Write serialization page so VM thread can do a pseudo remote membar. 1205 // We use the current thread pointer to calculate a thread specific 1206 // offset to write to within the page. This minimizes bus traffic 1207 // due to cache line collision. 1208 __ serialize_memory(thread, rcx); 1209 } 1210 } 1211 1212 #ifndef _LP64 1213 if (AlwaysRestoreFPU) { 1214 // Make sure the control word is correct. 1215 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1216 } 1217 #endif // _LP64 1218 1219 // check for safepoint operation in progress and/or pending suspend requests 1220 { 1221 Label Continue; 1222 Label slow_path; 1223 1224 #ifndef _LP64 1225 __ safepoint_poll(slow_path); 1226 #else 1227 __ safepoint_poll(slow_path, r15_thread, rscratch1); 1228 #endif 1229 1230 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1231 __ jcc(Assembler::equal, Continue); 1232 __ bind(slow_path); 1233 1234 // Don't use call_VM as it will see a possible pending exception 1235 // and forward it and never return here preventing us from 1236 // clearing _last_native_pc down below. Also can't use 1237 // call_VM_leaf either as it will check to see if r13 & r14 are 1238 // preserved and correspond to the bcp/locals pointers. So we do a 1239 // runtime call by hand. 1240 // 1241 #ifndef _LP64 1242 __ push(thread); 1243 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1244 JavaThread::check_special_condition_for_native_trans))); 1245 __ increment(rsp, wordSize); 1246 __ get_thread(thread); 1247 #else 1248 __ mov(c_rarg0, r15_thread); 1249 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1250 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1251 __ andptr(rsp, -16); // align stack as required by ABI 1252 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1253 __ mov(rsp, r12); // restore sp 1254 __ reinit_heapbase(); 1255 #endif // _LP64 1256 __ bind(Continue); 1257 } 1258 1259 // change thread state 1260 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1261 1262 // reset_last_Java_frame 1263 __ reset_last_Java_frame(thread, true); 1264 1265 if (CheckJNICalls) { 1266 // clear_pending_jni_exception_check 1267 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1268 } 1269 1270 // reset handle block 1271 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1272 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1273 1274 // If result is an oop unbox and store it in frame where gc will see it 1275 // and result handler will pick it up 1276 1277 { 1278 Label no_oop, not_weak, store_result; 1279 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1280 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1281 __ jcc(Assembler::notEqual, no_oop); 1282 // retrieve result 1283 __ pop(ltos); 1284 // Unbox oop result, e.g. JNIHandles::resolve value. 1285 __ resolve_jobject(rax /* value */, 1286 thread /* thread */, 1287 t /* tmp */); 1288 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1289 // keep stack depth as expected by pushing oop which will eventually be discarded 1290 __ push(ltos); 1291 __ bind(no_oop); 1292 } 1293 1294 1295 { 1296 Label no_reguard; 1297 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1298 JavaThread::stack_guard_yellow_reserved_disabled); 1299 __ jcc(Assembler::notEqual, no_reguard); 1300 1301 __ pusha(); // XXX only save smashed registers 1302 #ifndef _LP64 1303 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1304 __ popa(); 1305 #else 1306 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1307 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1308 __ andptr(rsp, -16); // align stack as required by ABI 1309 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1310 __ mov(rsp, r12); // restore sp 1311 __ popa(); // XXX only restore smashed registers 1312 __ reinit_heapbase(); 1313 #endif // _LP64 1314 1315 __ bind(no_reguard); 1316 } 1317 1318 1319 // The method register is junk from after the thread_in_native transition 1320 // until here. Also can't call_VM until the bcp has been 1321 // restored. Need bcp for throwing exception below so get it now. 1322 __ get_method(method); 1323 1324 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1325 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1326 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1327 1328 // handle exceptions (exception handling will handle unlocking!) 1329 { 1330 Label L; 1331 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1332 __ jcc(Assembler::zero, L); 1333 // Note: At some point we may want to unify this with the code 1334 // used in call_VM_base(); i.e., we should use the 1335 // StubRoutines::forward_exception code. For now this doesn't work 1336 // here because the rsp is not correctly set at this point. 1337 __ MacroAssembler::call_VM(noreg, 1338 CAST_FROM_FN_PTR(address, 1339 InterpreterRuntime::throw_pending_exception)); 1340 __ should_not_reach_here(); 1341 __ bind(L); 1342 } 1343 1344 // do unlocking if necessary 1345 { 1346 Label L; 1347 __ movl(t, Address(method, Method::access_flags_offset())); 1348 __ testl(t, JVM_ACC_SYNCHRONIZED); 1349 __ jcc(Assembler::zero, L); 1350 // the code below should be shared with interpreter macro 1351 // assembler implementation 1352 { 1353 Label unlock; 1354 // BasicObjectLock will be first in list, since this is a 1355 // synchronized method. However, need to check that the object 1356 // has not been unlocked by an explicit monitorexit bytecode. 1357 const Address monitor(rbp, 1358 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1359 wordSize - (int)sizeof(BasicObjectLock))); 1360 1361 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1362 1363 // monitor expect in c_rarg1 for slow unlock path 1364 __ lea(regmon, monitor); // address of first monitor 1365 1366 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1367 __ testptr(t, t); 1368 __ jcc(Assembler::notZero, unlock); 1369 1370 // Entry already unlocked, need to throw exception 1371 __ MacroAssembler::call_VM(noreg, 1372 CAST_FROM_FN_PTR(address, 1373 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1374 __ should_not_reach_here(); 1375 1376 __ bind(unlock); 1377 __ unlock_object(regmon); 1378 } 1379 __ bind(L); 1380 } 1381 1382 // jvmti support 1383 // Note: This must happen _after_ handling/throwing any exceptions since 1384 // the exception handler code notifies the runtime of method exits 1385 // too. If this happens before, method entry/exit notifications are 1386 // not properly paired (was bug - gri 11/22/99). 1387 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1388 1389 // restore potential result in edx:eax, call result handler to 1390 // restore potential result in ST0 & handle result 1391 1392 __ pop(ltos); 1393 LP64_ONLY( __ pop(dtos)); 1394 1395 __ movptr(t, Address(rbp, 1396 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1397 __ call(t); 1398 1399 // remove activation 1400 __ movptr(t, Address(rbp, 1401 frame::interpreter_frame_sender_sp_offset * 1402 wordSize)); // get sender sp 1403 __ leave(); // remove frame anchor 1404 __ pop(rdi); // get return address 1405 __ mov(rsp, t); // set sp to sender sp 1406 __ jmp(rdi); 1407 1408 if (inc_counter) { 1409 // Handle overflow of counter and compile method 1410 __ bind(invocation_counter_overflow); 1411 generate_counter_overflow(continue_after_compile); 1412 } 1413 1414 return entry_point; 1415 } 1416 1417 // Abstract method entry 1418 // Attempt to execute abstract method. Throw exception 1419 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1420 1421 address entry_point = __ pc(); 1422 1423 // abstract method entry 1424 1425 // pop return address, reset last_sp to NULL 1426 __ empty_expression_stack(); 1427 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1428 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1429 1430 // throw exception 1431 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 1432 // the call_VM checks for exception, so we should never return here. 1433 __ should_not_reach_here(); 1434 1435 return entry_point; 1436 } 1437 1438 // 1439 // Generic interpreted method entry to (asm) interpreter 1440 // 1441 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1442 // determine code generation flags 1443 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1444 1445 // ebx: Method* 1446 // rbcp: sender sp 1447 address entry_point = __ pc(); 1448 1449 const Address constMethod(rbx, Method::const_offset()); 1450 const Address access_flags(rbx, Method::access_flags_offset()); 1451 const Address size_of_parameters(rdx, 1452 ConstMethod::size_of_parameters_offset()); 1453 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1454 1455 1456 // get parameter size (always needed) 1457 __ movptr(rdx, constMethod); 1458 __ load_unsigned_short(rcx, size_of_parameters); 1459 1460 // rbx: Method* 1461 // rcx: size of parameters 1462 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1463 1464 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1465 __ subl(rdx, rcx); // rdx = no. of additional locals 1466 1467 // YYY 1468 // __ incrementl(rdx); 1469 // __ andl(rdx, -2); 1470 1471 // see if we've got enough room on the stack for locals plus overhead. 1472 generate_stack_overflow_check(); 1473 1474 // get return address 1475 __ pop(rax); 1476 1477 // compute beginning of parameters 1478 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1479 1480 // rdx - # of additional locals 1481 // allocate space for locals 1482 // explicitly initialize locals 1483 { 1484 Label exit, loop; 1485 __ testl(rdx, rdx); 1486 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1487 __ bind(loop); 1488 __ push((int) NULL_WORD); // initialize local variables 1489 __ decrementl(rdx); // until everything initialized 1490 __ jcc(Assembler::greater, loop); 1491 __ bind(exit); 1492 } 1493 1494 // initialize fixed part of activation frame 1495 generate_fixed_frame(false); 1496 1497 // make sure method is not native & not abstract 1498 #ifdef ASSERT 1499 __ movl(rax, access_flags); 1500 { 1501 Label L; 1502 __ testl(rax, JVM_ACC_NATIVE); 1503 __ jcc(Assembler::zero, L); 1504 __ stop("tried to execute native method as non-native"); 1505 __ bind(L); 1506 } 1507 { 1508 Label L; 1509 __ testl(rax, JVM_ACC_ABSTRACT); 1510 __ jcc(Assembler::zero, L); 1511 __ stop("tried to execute abstract method in interpreter"); 1512 __ bind(L); 1513 } 1514 #endif 1515 1516 // Since at this point in the method invocation the exception 1517 // handler would try to exit the monitor of synchronized methods 1518 // which hasn't been entered yet, we set the thread local variable 1519 // _do_not_unlock_if_synchronized to true. The remove_activation 1520 // will check this flag. 1521 1522 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1523 NOT_LP64(__ get_thread(thread)); 1524 const Address do_not_unlock_if_synchronized(thread, 1525 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1526 __ movbool(do_not_unlock_if_synchronized, true); 1527 1528 __ profile_parameters_type(rax, rcx, rdx); 1529 // increment invocation count & check for overflow 1530 Label invocation_counter_overflow; 1531 Label profile_method; 1532 Label profile_method_continue; 1533 if (inc_counter) { 1534 generate_counter_incr(&invocation_counter_overflow, 1535 &profile_method, 1536 &profile_method_continue); 1537 if (ProfileInterpreter) { 1538 __ bind(profile_method_continue); 1539 } 1540 } 1541 1542 Label continue_after_compile; 1543 __ bind(continue_after_compile); 1544 1545 // check for synchronized interpreted methods 1546 bang_stack_shadow_pages(false); 1547 1548 // reset the _do_not_unlock_if_synchronized flag 1549 NOT_LP64(__ get_thread(thread)); 1550 __ movbool(do_not_unlock_if_synchronized, false); 1551 1552 // check for synchronized methods 1553 // Must happen AFTER invocation_counter check and stack overflow check, 1554 // so method is not locked if overflows. 1555 if (synchronized) { 1556 // Allocate monitor and lock method 1557 lock_method(); 1558 } else { 1559 // no synchronization necessary 1560 #ifdef ASSERT 1561 { 1562 Label L; 1563 __ movl(rax, access_flags); 1564 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1565 __ jcc(Assembler::zero, L); 1566 __ stop("method needs synchronization"); 1567 __ bind(L); 1568 } 1569 #endif 1570 } 1571 1572 // start execution 1573 #ifdef ASSERT 1574 { 1575 Label L; 1576 const Address monitor_block_top (rbp, 1577 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1578 __ movptr(rax, monitor_block_top); 1579 __ cmpptr(rax, rsp); 1580 __ jcc(Assembler::equal, L); 1581 __ stop("broken stack frame setup in interpreter"); 1582 __ bind(L); 1583 } 1584 #endif 1585 1586 // jvmti support 1587 __ notify_method_entry(); 1588 1589 __ dispatch_next(vtos); 1590 1591 // invocation counter overflow 1592 if (inc_counter) { 1593 if (ProfileInterpreter) { 1594 // We have decided to profile this method in the interpreter 1595 __ bind(profile_method); 1596 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1597 __ set_method_data_pointer_for_bcp(); 1598 __ get_method(rbx); 1599 __ jmp(profile_method_continue); 1600 } 1601 // Handle overflow of counter and compile method 1602 __ bind(invocation_counter_overflow); 1603 generate_counter_overflow(continue_after_compile); 1604 } 1605 1606 return entry_point; 1607 } 1608 1609 //----------------------------------------------------------------------------- 1610 // Exceptions 1611 1612 void TemplateInterpreterGenerator::generate_throw_exception() { 1613 // Entry point in previous activation (i.e., if the caller was 1614 // interpreted) 1615 Interpreter::_rethrow_exception_entry = __ pc(); 1616 // Restore sp to interpreter_frame_last_sp even though we are going 1617 // to empty the expression stack for the exception processing. 1618 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1619 // rax: exception 1620 // rdx: return address/pc that threw exception 1621 __ restore_bcp(); // r13/rsi points to call/send 1622 __ restore_locals(); 1623 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1624 // Entry point for exceptions thrown within interpreter code 1625 Interpreter::_throw_exception_entry = __ pc(); 1626 // expression stack is undefined here 1627 // rax: exception 1628 // r13/rsi: exception bcp 1629 __ verify_oop(rax); 1630 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1631 LP64_ONLY(__ mov(c_rarg1, rax)); 1632 1633 // expression stack must be empty before entering the VM in case of 1634 // an exception 1635 __ empty_expression_stack(); 1636 // find exception handler address and preserve exception oop 1637 __ call_VM(rdx, 1638 CAST_FROM_FN_PTR(address, 1639 InterpreterRuntime::exception_handler_for_exception), 1640 rarg); 1641 // rax: exception handler entry point 1642 // rdx: preserved exception oop 1643 // r13/rsi: bcp for exception handler 1644 __ push_ptr(rdx); // push exception which is now the only value on the stack 1645 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1646 1647 // If the exception is not handled in the current frame the frame is 1648 // removed and the exception is rethrown (i.e. exception 1649 // continuation is _rethrow_exception). 1650 // 1651 // Note: At this point the bci is still the bxi for the instruction 1652 // which caused the exception and the expression stack is 1653 // empty. Thus, for any VM calls at this point, GC will find a legal 1654 // oop map (with empty expression stack). 1655 1656 // In current activation 1657 // tos: exception 1658 // esi: exception bcp 1659 1660 // 1661 // JVMTI PopFrame support 1662 // 1663 1664 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1665 __ empty_expression_stack(); 1666 // Set the popframe_processing bit in pending_popframe_condition 1667 // indicating that we are currently handling popframe, so that 1668 // call_VMs that may happen later do not trigger new popframe 1669 // handling cycles. 1670 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1671 NOT_LP64(__ get_thread(thread)); 1672 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1673 __ orl(rdx, JavaThread::popframe_processing_bit); 1674 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1675 1676 { 1677 // Check to see whether we are returning to a deoptimized frame. 1678 // (The PopFrame call ensures that the caller of the popped frame is 1679 // either interpreted or compiled and deoptimizes it if compiled.) 1680 // In this case, we can't call dispatch_next() after the frame is 1681 // popped, but instead must save the incoming arguments and restore 1682 // them after deoptimization has occurred. 1683 // 1684 // Note that we don't compare the return PC against the 1685 // deoptimization blob's unpack entry because of the presence of 1686 // adapter frames in C2. 1687 Label caller_not_deoptimized; 1688 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1689 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1690 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1691 InterpreterRuntime::interpreter_contains), rarg); 1692 __ testl(rax, rax); 1693 __ jcc(Assembler::notZero, caller_not_deoptimized); 1694 1695 // Compute size of arguments for saving when returning to 1696 // deoptimized caller 1697 __ get_method(rax); 1698 __ movptr(rax, Address(rax, Method::const_offset())); 1699 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1700 size_of_parameters_offset()))); 1701 __ shll(rax, Interpreter::logStackElementSize); 1702 __ restore_locals(); 1703 __ subptr(rlocals, rax); 1704 __ addptr(rlocals, wordSize); 1705 // Save these arguments 1706 NOT_LP64(__ get_thread(thread)); 1707 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1708 Deoptimization:: 1709 popframe_preserve_args), 1710 thread, rax, rlocals); 1711 1712 __ remove_activation(vtos, rdx, 1713 /* throw_monitor_exception */ false, 1714 /* install_monitor_exception */ false, 1715 /* notify_jvmdi */ false); 1716 1717 // Inform deoptimization that it is responsible for restoring 1718 // these arguments 1719 NOT_LP64(__ get_thread(thread)); 1720 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1721 JavaThread::popframe_force_deopt_reexecution_bit); 1722 1723 // Continue in deoptimization handler 1724 __ jmp(rdx); 1725 1726 __ bind(caller_not_deoptimized); 1727 } 1728 1729 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1730 /* throw_monitor_exception */ false, 1731 /* install_monitor_exception */ false, 1732 /* notify_jvmdi */ false); 1733 1734 // Finish with popframe handling 1735 // A previous I2C followed by a deoptimization might have moved the 1736 // outgoing arguments further up the stack. PopFrame expects the 1737 // mutations to those outgoing arguments to be preserved and other 1738 // constraints basically require this frame to look exactly as 1739 // though it had previously invoked an interpreted activation with 1740 // no space between the top of the expression stack (current 1741 // last_sp) and the top of stack. Rather than force deopt to 1742 // maintain this kind of invariant all the time we call a small 1743 // fixup routine to move the mutated arguments onto the top of our 1744 // expression stack if necessary. 1745 #ifndef _LP64 1746 __ mov(rax, rsp); 1747 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1748 __ get_thread(thread); 1749 // PC must point into interpreter here 1750 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1751 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1752 __ get_thread(thread); 1753 #else 1754 __ mov(c_rarg1, rsp); 1755 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1756 // PC must point into interpreter here 1757 __ set_last_Java_frame(noreg, rbp, __ pc()); 1758 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1759 #endif 1760 __ reset_last_Java_frame(thread, true); 1761 1762 // Restore the last_sp and null it out 1763 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1764 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1765 1766 __ restore_bcp(); 1767 __ restore_locals(); 1768 // The method data pointer was incremented already during 1769 // call profiling. We have to restore the mdp for the current bcp. 1770 if (ProfileInterpreter) { 1771 __ set_method_data_pointer_for_bcp(); 1772 } 1773 1774 // Clear the popframe condition flag 1775 NOT_LP64(__ get_thread(thread)); 1776 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1777 JavaThread::popframe_inactive); 1778 1779 #if INCLUDE_JVMTI 1780 { 1781 Label L_done; 1782 const Register local0 = rlocals; 1783 1784 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1785 __ jcc(Assembler::notEqual, L_done); 1786 1787 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1788 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1789 1790 __ get_method(rdx); 1791 __ movptr(rax, Address(local0, 0)); 1792 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1793 1794 __ testptr(rax, rax); 1795 __ jcc(Assembler::zero, L_done); 1796 1797 __ movptr(Address(rbx, 0), rax); 1798 __ bind(L_done); 1799 } 1800 #endif // INCLUDE_JVMTI 1801 1802 __ dispatch_next(vtos); 1803 // end of PopFrame support 1804 1805 Interpreter::_remove_activation_entry = __ pc(); 1806 1807 // preserve exception over this code sequence 1808 __ pop_ptr(rax); 1809 NOT_LP64(__ get_thread(thread)); 1810 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1811 // remove the activation (without doing throws on illegalMonitorExceptions) 1812 __ remove_activation(vtos, rdx, false, true, false); 1813 // restore exception 1814 NOT_LP64(__ get_thread(thread)); 1815 __ get_vm_result(rax, thread); 1816 1817 // In between activations - previous activation type unknown yet 1818 // compute continuation point - the continuation point expects the 1819 // following registers set up: 1820 // 1821 // rax: exception 1822 // rdx: return address/pc that threw exception 1823 // rsp: expression stack of caller 1824 // rbp: ebp of caller 1825 __ push(rax); // save exception 1826 __ push(rdx); // save return address 1827 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1828 SharedRuntime::exception_handler_for_return_address), 1829 thread, rdx); 1830 __ mov(rbx, rax); // save exception handler 1831 __ pop(rdx); // restore return address 1832 __ pop(rax); // restore exception 1833 // Note that an "issuing PC" is actually the next PC after the call 1834 __ jmp(rbx); // jump to exception 1835 // handler of caller 1836 } 1837 1838 1839 // 1840 // JVMTI ForceEarlyReturn support 1841 // 1842 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1843 address entry = __ pc(); 1844 1845 __ restore_bcp(); 1846 __ restore_locals(); 1847 __ empty_expression_stack(); 1848 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1849 1850 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1851 NOT_LP64(__ get_thread(thread)); 1852 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1853 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1854 1855 // Clear the earlyret state 1856 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1857 1858 __ remove_activation(state, rsi, 1859 false, /* throw_monitor_exception */ 1860 false, /* install_monitor_exception */ 1861 true); /* notify_jvmdi */ 1862 __ jmp(rsi); 1863 1864 return entry; 1865 } // end of ForceEarlyReturn support 1866 1867 1868 //----------------------------------------------------------------------------- 1869 // Helper for vtos entry point generation 1870 1871 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1872 address& bep, 1873 address& cep, 1874 address& sep, 1875 address& aep, 1876 address& iep, 1877 address& lep, 1878 address& fep, 1879 address& dep, 1880 address& qep, 1881 address& vep) { 1882 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1883 Label L; 1884 aep = __ pc(); __ push_ptr(); __ jmp(L); 1885 qep = __ pc(); __ push_ptr(); __ jmp(L); 1886 #ifndef _LP64 1887 fep = __ pc(); __ push(ftos); __ jmp(L); 1888 dep = __ pc(); __ push(dtos); __ jmp(L); 1889 #else 1890 fep = __ pc(); __ push_f(xmm0); __ jmp(L); 1891 dep = __ pc(); __ push_d(xmm0); __ jmp(L); 1892 #endif // _LP64 1893 lep = __ pc(); __ push_l(); __ jmp(L); 1894 bep = cep = sep = 1895 iep = __ pc(); __ push_i(); 1896 vep = __ pc(); 1897 __ bind(L); 1898 generate_and_dispatch(t); 1899 } 1900 1901 //----------------------------------------------------------------------------- 1902 1903 // Non-product code 1904 #ifndef PRODUCT 1905 1906 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1907 address entry = __ pc(); 1908 1909 #ifndef _LP64 1910 // prepare expression stack 1911 __ pop(rcx); // pop return address so expression stack is 'pure' 1912 __ push(state); // save tosca 1913 1914 // pass tosca registers as arguments & call tracer 1915 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1916 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1917 __ pop(state); // restore tosca 1918 1919 // return 1920 __ jmp(rcx); 1921 #else 1922 __ push(state); 1923 __ push(c_rarg0); 1924 __ push(c_rarg1); 1925 __ push(c_rarg2); 1926 __ push(c_rarg3); 1927 __ mov(c_rarg2, rax); // Pass itos 1928 #ifdef _WIN64 1929 __ movflt(xmm3, xmm0); // Pass ftos 1930 #endif 1931 __ call_VM(noreg, 1932 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1933 c_rarg1, c_rarg2, c_rarg3); 1934 __ pop(c_rarg3); 1935 __ pop(c_rarg2); 1936 __ pop(c_rarg1); 1937 __ pop(c_rarg0); 1938 __ pop(state); 1939 __ ret(0); // return from result handler 1940 #endif // _LP64 1941 1942 return entry; 1943 } 1944 1945 void TemplateInterpreterGenerator::count_bytecode() { 1946 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1947 } 1948 1949 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1950 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1951 } 1952 1953 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1954 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1955 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1956 __ orl(rbx, 1957 ((int) t->bytecode()) << 1958 BytecodePairHistogram::log2_number_of_codes); 1959 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1960 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1961 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1962 } 1963 1964 1965 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1966 // Call a little run-time stub to avoid blow-up for each bytecode. 1967 // The run-time runtime saves the right registers, depending on 1968 // the tosca in-state for the given template. 1969 1970 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1971 "entry must have been generated"); 1972 #ifndef _LP64 1973 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1974 #else 1975 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1976 __ andptr(rsp, -16); // align stack as required by ABI 1977 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1978 __ mov(rsp, r12); // restore sp 1979 __ reinit_heapbase(); 1980 #endif // _LP64 1981 } 1982 1983 1984 void TemplateInterpreterGenerator::stop_interpreter_at() { 1985 Label L; 1986 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1987 StopInterpreterAt); 1988 __ jcc(Assembler::notEqual, L); 1989 __ int3(); 1990 __ bind(L); 1991 } 1992 #endif // !PRODUCT