1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "interpreter/templateInterpreterGenerator.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/methodData.hpp" 37 #include "oops/method.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "oops/valueKlass.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "prims/jvmtiThreadState.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/deoptimization.hpp" 44 #include "runtime/frame.inline.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/synchronizer.hpp" 48 #include "runtime/timer.hpp" 49 #include "runtime/vframeArray.hpp" 50 #include "utilities/debug.hpp" 51 #include "utilities/macros.hpp" 52 53 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 54 55 // Size of interpreter code. Increase if too small. Interpreter will 56 // fail with a guarantee ("not enough space for interpreter generation"); 57 // if too small. 58 // Run with +PrintInterpreter to get the VM to print out the size. 59 // Max size with JVMTI 60 #ifdef AMD64 61 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(280) NOT_JVMCI(268) * 1024; 62 #else 63 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 64 #endif // AMD64 65 66 // Global Register Names 67 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 68 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 69 70 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 71 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 72 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 73 74 75 //----------------------------------------------------------------------------- 76 77 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 78 address entry = __ pc(); 79 80 #ifdef ASSERT 81 { 82 Label L; 83 __ lea(rax, Address(rbp, 84 frame::interpreter_frame_monitor_block_top_offset * 85 wordSize)); 86 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 87 // grows negative) 88 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 89 __ stop ("interpreter frame not set up"); 90 __ bind(L); 91 } 92 #endif // ASSERT 93 // Restore bcp under the assumption that the current frame is still 94 // interpreted 95 __ restore_bcp(); 96 97 // expression stack must be empty before entering the VM if an 98 // exception happened 99 __ empty_expression_stack(); 100 // throw exception 101 __ call_VM(noreg, 102 CAST_FROM_FN_PTR(address, 103 InterpreterRuntime::throw_StackOverflowError)); 104 return entry; 105 } 106 107 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 108 address entry = __ pc(); 109 // The expression stack must be empty before entering the VM if an 110 // exception happened. 111 __ empty_expression_stack(); 112 113 // Setup parameters. 114 // ??? convention: expect aberrant index in register ebx/rbx. 115 // Pass array to create more detailed exceptions. 116 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 117 __ call_VM(noreg, 118 CAST_FROM_FN_PTR(address, 119 InterpreterRuntime:: 120 throw_ArrayIndexOutOfBoundsException), 121 rarg, rbx); 122 return entry; 123 } 124 125 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 126 address entry = __ pc(); 127 128 // object is at TOS 129 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 130 __ pop(rarg); 131 132 // expression stack must be empty before entering the VM if an 133 // exception happened 134 __ empty_expression_stack(); 135 136 __ call_VM(noreg, 137 CAST_FROM_FN_PTR(address, 138 InterpreterRuntime:: 139 throw_ClassCastException), 140 rarg); 141 return entry; 142 } 143 144 address TemplateInterpreterGenerator::generate_exception_handler_common( 145 const char* name, const char* message, bool pass_oop) { 146 assert(!pass_oop || message == NULL, "either oop or message but not both"); 147 address entry = __ pc(); 148 149 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 150 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 151 152 if (pass_oop) { 153 // object is at TOS 154 __ pop(rarg2); 155 } 156 // expression stack must be empty before entering the VM if an 157 // exception happened 158 __ empty_expression_stack(); 159 // setup parameters 160 __ lea(rarg, ExternalAddress((address)name)); 161 if (pass_oop) { 162 __ call_VM(rax, CAST_FROM_FN_PTR(address, 163 InterpreterRuntime:: 164 create_klass_exception), 165 rarg, rarg2); 166 } else { 167 __ lea(rarg2, ExternalAddress((address)message)); 168 __ call_VM(rax, 169 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 170 rarg, rarg2); 171 } 172 // throw exception 173 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 174 return entry; 175 } 176 177 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 178 address entry = __ pc(); 179 180 #ifndef _LP64 181 #ifdef COMPILER2 182 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 183 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 184 for (int i = 1; i < 8; i++) { 185 __ ffree(i); 186 } 187 } else if (UseSSE < 2) { 188 __ empty_FPU_stack(); 189 } 190 #endif // COMPILER2 191 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 192 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 193 } else { 194 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 195 } 196 197 if (state == ftos) { 198 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 199 } else if (state == dtos) { 200 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 201 } 202 #endif // _LP64 203 204 // Restore stack bottom in case i2c adjusted stack 205 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 206 // and NULL it as marker that esp is now tos until next java call 207 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 208 209 if (/*state == qtos*/ false && ValueTypeReturnedAsFields) { 210 #ifndef _LP64 211 __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf()); 212 #else 213 // A value type is being returned. If fields are in registers we 214 // need to allocate a value type instance and initialize it with 215 // the value of the fields. 216 Label skip, slow_case; 217 // We only need a new buffered value if a new one is not returned 218 __ testptr(rax, 1); 219 __ jcc(Assembler::zero, skip); 220 221 // Try to allocate a new buffered value (from the heap) 222 if (UseTLAB) { 223 __ mov(rbx, rax); 224 __ andptr(rbx, -2); 225 226 __ movl(r14, Address(rbx, Klass::layout_helper_offset())); 227 228 __ movptr(r13, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 229 __ lea(r14, Address(r13, r14, Address::times_1)); 230 __ cmpptr(r14, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); 231 __ jcc(Assembler::above, slow_case); 232 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), r14); 233 234 if (UseBiasedLocking) { 235 __ movptr(rax, Address(rbx, Klass::prototype_header_offset())); 236 __ movptr(Address(r13, oopDesc::mark_offset_in_bytes ()), rax); 237 } else { 238 __ movptr(Address(r13, oopDesc::mark_offset_in_bytes ()), 239 (intptr_t)markOopDesc::prototype()); 240 } 241 __ xorl(rax, rax); // use zero reg to clear memory (shorter code) 242 __ store_klass_gap(r13, rax); // zero klass gap for compressed oops 243 __ mov(rax, rbx); 244 __ store_klass(r13, rbx); // klass 245 246 // We have our new buffered value, initialize its fields with a 247 // value class specific handler 248 __ movptr(rbx, Address(rax, ValueKlass::pack_handler_offset())); 249 __ mov(rax, r13); 250 __ call(rbx); 251 __ jmp(skip); 252 } 253 254 __ bind(slow_case); 255 // We failed to allocate a new value, fall back to a runtime 256 // call. Some oop field may be live in some registers but we can't 257 // tell. That runtime call will take care of preserving them 258 // across a GC if there's one. 259 __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf()); 260 __ bind(skip); 261 #endif 262 } 263 264 __ restore_bcp(); 265 __ restore_locals(); 266 267 if (state == atos) { 268 Register mdp = rbx; 269 Register tmp = rcx; 270 __ profile_return_type(mdp, rax, tmp); 271 } 272 273 const Register cache = rbx; 274 const Register index = rcx; 275 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 276 277 const Register flags = cache; 278 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 279 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 280 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 281 282 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 283 if (JvmtiExport::can_pop_frame()) { 284 NOT_LP64(__ get_thread(java_thread)); 285 __ check_and_handle_popframe(java_thread); 286 } 287 if (JvmtiExport::can_force_early_return()) { 288 NOT_LP64(__ get_thread(java_thread)); 289 __ check_and_handle_earlyret(java_thread); 290 } 291 292 __ dispatch_next(state, step); 293 294 return entry; 295 } 296 297 298 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 299 address entry = __ pc(); 300 301 #ifndef _LP64 302 if (state == ftos) { 303 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 304 } else if (state == dtos) { 305 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 306 } 307 #endif // _LP64 308 309 // NULL last_sp until next java call 310 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 311 __ restore_bcp(); 312 __ restore_locals(); 313 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 314 NOT_LP64(__ get_thread(thread)); 315 #if INCLUDE_JVMCI 316 // Check if we need to take lock at entry of synchronized method. This can 317 // only occur on method entry so emit it only for vtos with step 0. 318 if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) { 319 Label L; 320 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 321 __ jcc(Assembler::zero, L); 322 // Clear flag. 323 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 324 // Satisfy calling convention for lock_method(). 325 __ get_method(rbx); 326 // Take lock. 327 lock_method(); 328 __ bind(L); 329 } else { 330 #ifdef ASSERT 331 if (EnableJVMCI) { 332 Label L; 333 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 334 __ jcc(Assembler::zero, L); 335 __ stop("unexpected pending monitor in deopt entry"); 336 __ bind(L); 337 } 338 #endif 339 } 340 #endif 341 // handle exceptions 342 { 343 Label L; 344 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 345 __ jcc(Assembler::zero, L); 346 __ call_VM(noreg, 347 CAST_FROM_FN_PTR(address, 348 InterpreterRuntime::throw_pending_exception)); 349 __ should_not_reach_here(); 350 __ bind(L); 351 } 352 if (continuation == NULL) { 353 __ dispatch_next(state, step); 354 } else { 355 __ jump_to_entry(continuation); 356 } 357 return entry; 358 } 359 360 address TemplateInterpreterGenerator::generate_result_handler_for( 361 BasicType type) { 362 address entry = __ pc(); 363 switch (type) { 364 case T_BOOLEAN: __ c2bool(rax); break; 365 #ifndef _LP64 366 case T_CHAR : __ andptr(rax, 0xFFFF); break; 367 #else 368 case T_CHAR : __ movzwl(rax, rax); break; 369 #endif // _LP64 370 case T_BYTE : __ sign_extend_byte(rax); break; 371 case T_SHORT : __ sign_extend_short(rax); break; 372 case T_INT : /* nothing to do */ break; 373 case T_LONG : /* nothing to do */ break; 374 case T_VOID : /* nothing to do */ break; 375 #ifndef _LP64 376 case T_DOUBLE : 377 case T_FLOAT : 378 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 379 __ pop(t); // remove return address first 380 // Must return a result for interpreter or compiler. In SSE 381 // mode, results are returned in xmm0 and the FPU stack must 382 // be empty. 383 if (type == T_FLOAT && UseSSE >= 1) { 384 // Load ST0 385 __ fld_d(Address(rsp, 0)); 386 // Store as float and empty fpu stack 387 __ fstp_s(Address(rsp, 0)); 388 // and reload 389 __ movflt(xmm0, Address(rsp, 0)); 390 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 391 __ movdbl(xmm0, Address(rsp, 0)); 392 } else { 393 // restore ST0 394 __ fld_d(Address(rsp, 0)); 395 } 396 // and pop the temp 397 __ addptr(rsp, 2 * wordSize); 398 __ push(t); // restore return address 399 } 400 break; 401 #else 402 case T_FLOAT : /* nothing to do */ break; 403 case T_DOUBLE : /* nothing to do */ break; 404 #endif // _LP64 405 406 case T_VALUETYPE: // fall through (value types are handled with oops) 407 case T_OBJECT : 408 // retrieve result from frame 409 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 410 // and verify it 411 __ verify_oop(rax); 412 break; 413 default : ShouldNotReachHere(); 414 } 415 __ ret(0); // return from result handler 416 return entry; 417 } 418 419 address TemplateInterpreterGenerator::generate_safept_entry_for( 420 TosState state, 421 address runtime_entry) { 422 address entry = __ pc(); 423 __ push(state); 424 __ call_VM(noreg, runtime_entry); 425 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 426 return entry; 427 } 428 429 430 431 // Helpers for commoning out cases in the various type of method entries. 432 // 433 434 435 // increment invocation count & check for overflow 436 // 437 // Note: checking for negative value instead of overflow 438 // so we have a 'sticky' overflow test 439 // 440 // rbx: method 441 // rcx: invocation counter 442 // 443 void TemplateInterpreterGenerator::generate_counter_incr( 444 Label* overflow, 445 Label* profile_method, 446 Label* profile_method_continue) { 447 Label done; 448 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 449 if (TieredCompilation) { 450 int increment = InvocationCounter::count_increment; 451 Label no_mdo; 452 if (ProfileInterpreter) { 453 // Are we profiling? 454 __ movptr(rax, Address(rbx, Method::method_data_offset())); 455 __ testptr(rax, rax); 456 __ jccb(Assembler::zero, no_mdo); 457 // Increment counter in the MDO 458 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 459 in_bytes(InvocationCounter::counter_offset())); 460 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 461 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 462 __ jmp(done); 463 } 464 __ bind(no_mdo); 465 // Increment counter in MethodCounters 466 const Address invocation_counter(rax, 467 MethodCounters::invocation_counter_offset() + 468 InvocationCounter::counter_offset()); 469 __ get_method_counters(rbx, rax, done); 470 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 471 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 472 false, Assembler::zero, overflow); 473 __ bind(done); 474 } else { // not TieredCompilation 475 const Address backedge_counter(rax, 476 MethodCounters::backedge_counter_offset() + 477 InvocationCounter::counter_offset()); 478 const Address invocation_counter(rax, 479 MethodCounters::invocation_counter_offset() + 480 InvocationCounter::counter_offset()); 481 482 __ get_method_counters(rbx, rax, done); 483 484 if (ProfileInterpreter) { 485 __ incrementl(Address(rax, 486 MethodCounters::interpreter_invocation_counter_offset())); 487 } 488 // Update standard invocation counters 489 __ movl(rcx, invocation_counter); 490 __ incrementl(rcx, InvocationCounter::count_increment); 491 __ movl(invocation_counter, rcx); // save invocation count 492 493 __ movl(rax, backedge_counter); // load backedge counter 494 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 495 496 __ addl(rcx, rax); // add both counters 497 498 // profile_method is non-null only for interpreted method so 499 // profile_method != NULL == !native_call 500 501 if (ProfileInterpreter && profile_method != NULL) { 502 // Test to see if we should create a method data oop 503 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 504 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 505 __ jcc(Assembler::less, *profile_method_continue); 506 507 // if no method data exists, go to profile_method 508 __ test_method_data_pointer(rax, *profile_method); 509 } 510 511 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 512 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 513 __ jcc(Assembler::aboveEqual, *overflow); 514 __ bind(done); 515 } 516 } 517 518 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 519 520 // Asm interpreter on entry 521 // r14/rdi - locals 522 // r13/rsi - bcp 523 // rbx - method 524 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 525 // rbp - interpreter frame 526 527 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 528 // Everything as it was on entry 529 // rdx is not restored. Doesn't appear to really be set. 530 531 // InterpreterRuntime::frequency_counter_overflow takes two 532 // arguments, the first (thread) is passed by call_VM, the second 533 // indicates if the counter overflow occurs at a backwards branch 534 // (NULL bcp). We pass zero for it. The call returns the address 535 // of the verified entry point for the method or NULL if the 536 // compilation did not complete (either went background or bailed 537 // out). 538 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 539 __ movl(rarg, 0); 540 __ call_VM(noreg, 541 CAST_FROM_FN_PTR(address, 542 InterpreterRuntime::frequency_counter_overflow), 543 rarg); 544 545 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 546 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 547 // and jump to the interpreted entry. 548 __ jmp(do_continue, relocInfo::none); 549 } 550 551 // See if we've got enough room on the stack for locals plus overhead below 552 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 553 // without going through the signal handler, i.e., reserved and yellow zones 554 // will not be made usable. The shadow zone must suffice to handle the 555 // overflow. 556 // The expression stack grows down incrementally, so the normal guard 557 // page mechanism will work for that. 558 // 559 // NOTE: Since the additional locals are also always pushed (wasn't 560 // obvious in generate_fixed_frame) so the guard should work for them 561 // too. 562 // 563 // Args: 564 // rdx: number of additional locals this frame needs (what we must check) 565 // rbx: Method* 566 // 567 // Kills: 568 // rax 569 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 570 571 // monitor entry size: see picture of stack in frame_x86.hpp 572 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 573 574 // total overhead size: entry_size + (saved rbp through expr stack 575 // bottom). be sure to change this if you add/subtract anything 576 // to/from the overhead area 577 const int overhead_size = 578 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 579 580 const int page_size = os::vm_page_size(); 581 582 Label after_frame_check; 583 584 // see if the frame is greater than one page in size. If so, 585 // then we need to verify there is enough stack space remaining 586 // for the additional locals. 587 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 588 __ jcc(Assembler::belowEqual, after_frame_check); 589 590 // compute rsp as if this were going to be the last frame on 591 // the stack before the red zone 592 593 Label after_frame_check_pop; 594 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 595 #ifndef _LP64 596 __ push(thread); 597 __ get_thread(thread); 598 #endif 599 600 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 601 602 // locals + overhead, in bytes 603 __ mov(rax, rdx); 604 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 605 __ addptr(rax, overhead_size); 606 607 #ifdef ASSERT 608 Label limit_okay; 609 // Verify that thread stack overflow limit is non-zero. 610 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 611 __ jcc(Assembler::notEqual, limit_okay); 612 __ stop("stack overflow limit is zero"); 613 __ bind(limit_okay); 614 #endif 615 616 // Add locals/frame size to stack limit. 617 __ addptr(rax, stack_limit); 618 619 // Check against the current stack bottom. 620 __ cmpptr(rsp, rax); 621 622 __ jcc(Assembler::above, after_frame_check_pop); 623 NOT_LP64(__ pop(rsi)); // get saved bcp 624 625 // Restore sender's sp as SP. This is necessary if the sender's 626 // frame is an extended compiled frame (see gen_c2i_adapter()) 627 // and safer anyway in case of JSR292 adaptations. 628 629 __ pop(rax); // return address must be moved if SP is changed 630 __ mov(rsp, rbcp); 631 __ push(rax); 632 633 // Note: the restored frame is not necessarily interpreted. 634 // Use the shared runtime version of the StackOverflowError. 635 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 636 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 637 // all done with frame size check 638 __ bind(after_frame_check_pop); 639 NOT_LP64(__ pop(rsi)); 640 641 // all done with frame size check 642 __ bind(after_frame_check); 643 } 644 645 // Allocate monitor and lock method (asm interpreter) 646 // 647 // Args: 648 // rbx: Method* 649 // r14/rdi: locals 650 // 651 // Kills: 652 // rax 653 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 654 // rscratch1, rscratch2 (scratch regs) 655 void TemplateInterpreterGenerator::lock_method() { 656 // synchronize method 657 const Address access_flags(rbx, Method::access_flags_offset()); 658 const Address monitor_block_top( 659 rbp, 660 frame::interpreter_frame_monitor_block_top_offset * wordSize); 661 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 662 663 #ifdef ASSERT 664 { 665 Label L; 666 __ movl(rax, access_flags); 667 __ testl(rax, JVM_ACC_SYNCHRONIZED); 668 __ jcc(Assembler::notZero, L); 669 __ stop("method doesn't need synchronization"); 670 __ bind(L); 671 } 672 #endif // ASSERT 673 674 // get synchronization object 675 { 676 Label done; 677 __ movl(rax, access_flags); 678 __ testl(rax, JVM_ACC_STATIC); 679 // get receiver (assume this is frequent case) 680 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 681 __ jcc(Assembler::zero, done); 682 __ load_mirror(rax, rbx); 683 684 #ifdef ASSERT 685 { 686 Label L; 687 __ testptr(rax, rax); 688 __ jcc(Assembler::notZero, L); 689 __ stop("synchronization object is NULL"); 690 __ bind(L); 691 } 692 #endif // ASSERT 693 694 __ bind(done); 695 __ resolve(IS_NOT_NULL, rax); 696 } 697 698 // add space for monitor & lock 699 __ subptr(rsp, entry_size); // add space for a monitor entry 700 __ movptr(monitor_block_top, rsp); // set new monitor block top 701 // store object 702 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 703 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 704 __ movptr(lockreg, rsp); // object address 705 __ lock_object(lockreg); 706 } 707 708 // Generate a fixed interpreter frame. This is identical setup for 709 // interpreted methods and for native methods hence the shared code. 710 // 711 // Args: 712 // rax: return address 713 // rbx: Method* 714 // r14/rdi: pointer to locals 715 // r13/rsi: sender sp 716 // rdx: cp cache 717 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 718 // initialize fixed part of activation frame 719 __ push(rax); // save return address 720 __ enter(); // save old & set new rbp 721 __ push(rbcp); // set sender sp 722 __ push((int)NULL_WORD); // leave last_sp as null 723 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 724 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 725 __ push(rbx); // save Method* 726 // Get mirror and store it in the frame as GC root for this Method* 727 __ load_mirror(rdx, rbx); 728 __ push(rdx); 729 if (ProfileInterpreter) { 730 Label method_data_continue; 731 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 732 __ testptr(rdx, rdx); 733 __ jcc(Assembler::zero, method_data_continue); 734 __ addptr(rdx, in_bytes(MethodData::data_offset())); 735 __ bind(method_data_continue); 736 __ push(rdx); // set the mdp (method data pointer) 737 } else { 738 __ push(0); 739 } 740 741 __ movptr(rdx, Address(rbx, Method::const_offset())); 742 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 743 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 744 __ push(rdx); // set constant pool cache 745 const Register thread1 = NOT_LP64(rdx) LP64_ONLY(r15_thread); 746 NOT_LP64(__ get_thread(thread1)); 747 __ movptr(rdx, Address(thread1, JavaThread::vt_alloc_ptr_offset())); 748 __ push(rdx); // value type allocation pointer when activation is created 749 __ push(rlocals); // set locals pointer 750 if (native_call) { 751 __ push(0); // no bcp 752 } else { 753 __ push(rbcp); // set bcp 754 } 755 __ push(0); // reserve word for pointer to expression stack bottom 756 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 757 } 758 759 // End of helpers 760 761 // Method entry for java.lang.ref.Reference.get. 762 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 763 // Code: _aload_0, _getfield, _areturn 764 // parameter size = 1 765 // 766 // The code that gets generated by this routine is split into 2 parts: 767 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 768 // 2. The slow path - which is an expansion of the regular method entry. 769 // 770 // Notes:- 771 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 772 // * We may jump to the slow path iff the receiver is null. If the 773 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 774 // Thus we can use the regular method entry code to generate the NPE. 775 // 776 // rbx: Method* 777 778 // r13: senderSP must preserve for slow path, set SP to it on fast path 779 780 address entry = __ pc(); 781 782 const int referent_offset = java_lang_ref_Reference::referent_offset; 783 guarantee(referent_offset > 0, "referent offset not initialized"); 784 785 Label slow_path; 786 // rbx: method 787 788 // Check if local 0 != NULL 789 // If the receiver is null then it is OK to jump to the slow path. 790 __ movptr(rax, Address(rsp, wordSize)); 791 792 __ testptr(rax, rax); 793 __ jcc(Assembler::zero, slow_path); 794 795 // rax: local 0 796 // rbx: method (but can be used as scratch now) 797 // rdx: scratch 798 // rdi: scratch 799 800 // Preserve the sender sp in case the load barrier 801 // calls the runtime 802 NOT_LP64(__ push(rsi)); 803 804 // Load the value of the referent field. 805 const Address field_address(rax, referent_offset); 806 __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); 807 808 // _areturn 809 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 810 NOT_LP64(__ pop(rsi)); // get sender sp 811 __ pop(rdi); // get return address 812 __ mov(rsp, sender_sp); // set sp to sender sp 813 __ jmp(rdi); 814 __ ret(0); 815 816 // generate a vanilla interpreter entry as the slow path 817 __ bind(slow_path); 818 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 819 return entry; 820 } 821 822 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 823 // Quick & dirty stack overflow checking: bang the stack & handle trap. 824 // Note that we do the banging after the frame is setup, since the exception 825 // handling code expects to find a valid interpreter frame on the stack. 826 // Doing the banging earlier fails if the caller frame is not an interpreter 827 // frame. 828 // (Also, the exception throwing code expects to unlock any synchronized 829 // method receiever, so do the banging after locking the receiver.) 830 831 // Bang each page in the shadow zone. We can't assume it's been done for 832 // an interpreter frame with greater than a page of locals, so each page 833 // needs to be checked. Only true for non-native. 834 if (UseStackBanging) { 835 const int page_size = os::vm_page_size(); 836 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 837 const int start_page = native_call ? n_shadow_pages : 1; 838 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 839 __ bang_stack_with_offset(pages*page_size); 840 } 841 } 842 } 843 844 // Interpreter stub for calling a native method. (asm interpreter) 845 // This sets up a somewhat different looking stack for calling the 846 // native method than the typical interpreter frame setup. 847 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 848 // determine code generation flags 849 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 850 851 // rbx: Method* 852 // rbcp: sender sp 853 854 address entry_point = __ pc(); 855 856 const Address constMethod (rbx, Method::const_offset()); 857 const Address access_flags (rbx, Method::access_flags_offset()); 858 const Address size_of_parameters(rcx, ConstMethod:: 859 size_of_parameters_offset()); 860 861 862 // get parameter size (always needed) 863 __ movptr(rcx, constMethod); 864 __ load_unsigned_short(rcx, size_of_parameters); 865 866 // native calls don't need the stack size check since they have no 867 // expression stack and the arguments are already on the stack and 868 // we only add a handful of words to the stack 869 870 // rbx: Method* 871 // rcx: size of parameters 872 // rbcp: sender sp 873 __ pop(rax); // get return address 874 875 // for natives the size of locals is zero 876 877 // compute beginning of parameters 878 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 879 880 // add 2 zero-initialized slots for native calls 881 // initialize result_handler slot 882 __ push((int) NULL_WORD); 883 // slot for oop temp 884 // (static native method holder mirror/jni oop result) 885 __ push((int) NULL_WORD); 886 887 // initialize fixed part of activation frame 888 generate_fixed_frame(true); 889 890 // make sure method is native & not abstract 891 #ifdef ASSERT 892 __ movl(rax, access_flags); 893 { 894 Label L; 895 __ testl(rax, JVM_ACC_NATIVE); 896 __ jcc(Assembler::notZero, L); 897 __ stop("tried to execute non-native method as native"); 898 __ bind(L); 899 } 900 { 901 Label L; 902 __ testl(rax, JVM_ACC_ABSTRACT); 903 __ jcc(Assembler::zero, L); 904 __ stop("tried to execute abstract method in interpreter"); 905 __ bind(L); 906 } 907 #endif 908 909 // Since at this point in the method invocation the exception handler 910 // would try to exit the monitor of synchronized methods which hasn't 911 // been entered yet, we set the thread local variable 912 // _do_not_unlock_if_synchronized to true. The remove_activation will 913 // check this flag. 914 915 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 916 NOT_LP64(__ get_thread(thread1)); 917 const Address do_not_unlock_if_synchronized(thread1, 918 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 919 __ movbool(do_not_unlock_if_synchronized, true); 920 921 // increment invocation count & check for overflow 922 Label invocation_counter_overflow; 923 if (inc_counter) { 924 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 925 } 926 927 Label continue_after_compile; 928 __ bind(continue_after_compile); 929 930 bang_stack_shadow_pages(true); 931 932 // reset the _do_not_unlock_if_synchronized flag 933 NOT_LP64(__ get_thread(thread1)); 934 __ movbool(do_not_unlock_if_synchronized, false); 935 936 // check for synchronized methods 937 // Must happen AFTER invocation_counter check and stack overflow check, 938 // so method is not locked if overflows. 939 if (synchronized) { 940 lock_method(); 941 } else { 942 // no synchronization necessary 943 #ifdef ASSERT 944 { 945 Label L; 946 __ movl(rax, access_flags); 947 __ testl(rax, JVM_ACC_SYNCHRONIZED); 948 __ jcc(Assembler::zero, L); 949 __ stop("method needs synchronization"); 950 __ bind(L); 951 } 952 #endif 953 } 954 955 // start execution 956 #ifdef ASSERT 957 { 958 Label L; 959 const Address monitor_block_top(rbp, 960 frame::interpreter_frame_monitor_block_top_offset * wordSize); 961 __ movptr(rax, monitor_block_top); 962 __ cmpptr(rax, rsp); 963 __ jcc(Assembler::equal, L); 964 __ stop("broken stack frame setup in interpreter"); 965 __ bind(L); 966 } 967 #endif 968 969 // jvmti support 970 __ notify_method_entry(); 971 972 // work registers 973 const Register method = rbx; 974 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 975 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 976 977 // allocate space for parameters 978 __ get_method(method); 979 __ movptr(t, Address(method, Method::const_offset())); 980 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 981 982 #ifndef _LP64 983 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 984 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 985 __ subptr(rsp, t); 986 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 987 #else 988 __ shll(t, Interpreter::logStackElementSize); 989 990 __ subptr(rsp, t); 991 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 992 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 993 #endif // _LP64 994 995 // get signature handler 996 { 997 Label L; 998 __ movptr(t, Address(method, Method::signature_handler_offset())); 999 __ testptr(t, t); 1000 __ jcc(Assembler::notZero, L); 1001 __ call_VM(noreg, 1002 CAST_FROM_FN_PTR(address, 1003 InterpreterRuntime::prepare_native_call), 1004 method); 1005 __ get_method(method); 1006 __ movptr(t, Address(method, Method::signature_handler_offset())); 1007 __ bind(L); 1008 } 1009 1010 // call signature handler 1011 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 1012 "adjust this code"); 1013 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 1014 "adjust this code"); 1015 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 1016 "adjust this code"); 1017 1018 // The generated handlers do not touch RBX (the method oop). 1019 // However, large signatures cannot be cached and are generated 1020 // each time here. The slow-path generator can do a GC on return, 1021 // so we must reload it after the call. 1022 __ call(t); 1023 __ get_method(method); // slow path can do a GC, reload RBX 1024 1025 1026 // result handler is in rax 1027 // set result handler 1028 __ movptr(Address(rbp, 1029 (frame::interpreter_frame_result_handler_offset) * wordSize), 1030 rax); 1031 1032 // pass mirror handle if static call 1033 { 1034 Label L; 1035 __ movl(t, Address(method, Method::access_flags_offset())); 1036 __ testl(t, JVM_ACC_STATIC); 1037 __ jcc(Assembler::zero, L); 1038 // get mirror 1039 __ load_mirror(t, method, rax); 1040 // copy mirror into activation frame 1041 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1042 t); 1043 // pass handle to mirror 1044 #ifndef _LP64 1045 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1046 __ movptr(Address(rsp, wordSize), t); 1047 #else 1048 __ lea(c_rarg1, 1049 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1050 #endif // _LP64 1051 __ bind(L); 1052 } 1053 1054 // get native function entry point 1055 { 1056 Label L; 1057 __ movptr(rax, Address(method, Method::native_function_offset())); 1058 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1059 __ cmpptr(rax, unsatisfied.addr()); 1060 __ jcc(Assembler::notEqual, L); 1061 __ call_VM(noreg, 1062 CAST_FROM_FN_PTR(address, 1063 InterpreterRuntime::prepare_native_call), 1064 method); 1065 __ get_method(method); 1066 __ movptr(rax, Address(method, Method::native_function_offset())); 1067 __ bind(L); 1068 } 1069 1070 // pass JNIEnv 1071 #ifndef _LP64 1072 __ get_thread(thread); 1073 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1074 __ movptr(Address(rsp, 0), t); 1075 1076 // set_last_Java_frame_before_call 1077 // It is enough that the pc() 1078 // points into the right code segment. It does not have to be the correct return pc. 1079 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1080 #else 1081 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1082 1083 // It is enough that the pc() points into the right code 1084 // segment. It does not have to be the correct return pc. 1085 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1086 #endif // _LP64 1087 1088 // change thread state 1089 #ifdef ASSERT 1090 { 1091 Label L; 1092 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1093 __ cmpl(t, _thread_in_Java); 1094 __ jcc(Assembler::equal, L); 1095 __ stop("Wrong thread state in native stub"); 1096 __ bind(L); 1097 } 1098 #endif 1099 1100 // Change state to native 1101 1102 __ movl(Address(thread, JavaThread::thread_state_offset()), 1103 _thread_in_native); 1104 1105 // Call the native method. 1106 __ call(rax); 1107 // 32: result potentially in rdx:rax or ST0 1108 // 64: result potentially in rax or xmm0 1109 1110 // Verify or restore cpu control state after JNI call 1111 __ restore_cpu_control_state_after_jni(); 1112 1113 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1114 // in order to extract the result of a method call. If the order of these 1115 // pushes change or anything else is added to the stack then the code in 1116 // interpreter_frame_result must also change. 1117 1118 #ifndef _LP64 1119 // save potential result in ST(0) & rdx:rax 1120 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1121 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1122 // It is safe to do this push because state is _thread_in_native and return address will be found 1123 // via _last_native_pc and not via _last_jave_sp 1124 1125 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1126 // If the order changes or anything else is added to the stack the code in 1127 // interpreter_frame_result will have to be changed. 1128 1129 { Label L; 1130 Label push_double; 1131 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1132 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1133 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1134 float_handler.addr()); 1135 __ jcc(Assembler::equal, push_double); 1136 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1137 double_handler.addr()); 1138 __ jcc(Assembler::notEqual, L); 1139 __ bind(push_double); 1140 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1141 __ bind(L); 1142 } 1143 #else 1144 __ push(dtos); 1145 #endif // _LP64 1146 1147 __ push(ltos); 1148 1149 // change thread state 1150 NOT_LP64(__ get_thread(thread)); 1151 __ movl(Address(thread, JavaThread::thread_state_offset()), 1152 _thread_in_native_trans); 1153 1154 if (os::is_MP()) { 1155 if (UseMembar) { 1156 // Force this write out before the read below 1157 __ membar(Assembler::Membar_mask_bits( 1158 Assembler::LoadLoad | Assembler::LoadStore | 1159 Assembler::StoreLoad | Assembler::StoreStore)); 1160 } else { 1161 // Write serialization page so VM thread can do a pseudo remote membar. 1162 // We use the current thread pointer to calculate a thread specific 1163 // offset to write to within the page. This minimizes bus traffic 1164 // due to cache line collision. 1165 __ serialize_memory(thread, rcx); 1166 } 1167 } 1168 1169 #ifndef _LP64 1170 if (AlwaysRestoreFPU) { 1171 // Make sure the control word is correct. 1172 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1173 } 1174 #endif // _LP64 1175 1176 // check for safepoint operation in progress and/or pending suspend requests 1177 { 1178 Label Continue; 1179 Label slow_path; 1180 1181 #ifndef _LP64 1182 __ safepoint_poll(slow_path, thread, noreg); 1183 #else 1184 __ safepoint_poll(slow_path, r15_thread, rscratch1); 1185 #endif 1186 1187 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1188 __ jcc(Assembler::equal, Continue); 1189 __ bind(slow_path); 1190 1191 // Don't use call_VM as it will see a possible pending exception 1192 // and forward it and never return here preventing us from 1193 // clearing _last_native_pc down below. Also can't use 1194 // call_VM_leaf either as it will check to see if r13 & r14 are 1195 // preserved and correspond to the bcp/locals pointers. So we do a 1196 // runtime call by hand. 1197 // 1198 #ifndef _LP64 1199 __ push(thread); 1200 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1201 JavaThread::check_special_condition_for_native_trans))); 1202 __ increment(rsp, wordSize); 1203 __ get_thread(thread); 1204 #else 1205 __ mov(c_rarg0, r15_thread); 1206 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1207 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1208 __ andptr(rsp, -16); // align stack as required by ABI 1209 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1210 __ mov(rsp, r12); // restore sp 1211 __ reinit_heapbase(); 1212 #endif // _LP64 1213 __ bind(Continue); 1214 } 1215 1216 // change thread state 1217 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1218 1219 // reset_last_Java_frame 1220 __ reset_last_Java_frame(thread, true); 1221 1222 if (CheckJNICalls) { 1223 // clear_pending_jni_exception_check 1224 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1225 } 1226 1227 // reset handle block 1228 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1229 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1230 1231 // If result is an oop unbox and store it in frame where gc will see it 1232 // and result handler will pick it up 1233 1234 { 1235 Label no_oop, not_weak, store_result; 1236 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1237 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1238 __ jcc(Assembler::notEqual, no_oop); 1239 // retrieve result 1240 __ pop(ltos); 1241 // Unbox oop result, e.g. JNIHandles::resolve value. 1242 __ resolve_jobject(rax /* value */, 1243 thread /* thread */, 1244 t /* tmp */); 1245 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1246 // keep stack depth as expected by pushing oop which will eventually be discarded 1247 __ push(ltos); 1248 __ bind(no_oop); 1249 } 1250 1251 1252 { 1253 Label no_reguard; 1254 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1255 JavaThread::stack_guard_yellow_reserved_disabled); 1256 __ jcc(Assembler::notEqual, no_reguard); 1257 1258 __ pusha(); // XXX only save smashed registers 1259 #ifndef _LP64 1260 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1261 __ popa(); 1262 #else 1263 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1264 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1265 __ andptr(rsp, -16); // align stack as required by ABI 1266 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1267 __ mov(rsp, r12); // restore sp 1268 __ popa(); // XXX only restore smashed registers 1269 __ reinit_heapbase(); 1270 #endif // _LP64 1271 1272 __ bind(no_reguard); 1273 } 1274 1275 1276 // The method register is junk from after the thread_in_native transition 1277 // until here. Also can't call_VM until the bcp has been 1278 // restored. Need bcp for throwing exception below so get it now. 1279 __ get_method(method); 1280 1281 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1282 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1283 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1284 1285 // handle exceptions (exception handling will handle unlocking!) 1286 { 1287 Label L; 1288 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1289 __ jcc(Assembler::zero, L); 1290 // Note: At some point we may want to unify this with the code 1291 // used in call_VM_base(); i.e., we should use the 1292 // StubRoutines::forward_exception code. For now this doesn't work 1293 // here because the rsp is not correctly set at this point. 1294 __ MacroAssembler::call_VM(noreg, 1295 CAST_FROM_FN_PTR(address, 1296 InterpreterRuntime::throw_pending_exception)); 1297 __ should_not_reach_here(); 1298 __ bind(L); 1299 } 1300 1301 // do unlocking if necessary 1302 { 1303 Label L; 1304 __ movl(t, Address(method, Method::access_flags_offset())); 1305 __ testl(t, JVM_ACC_SYNCHRONIZED); 1306 __ jcc(Assembler::zero, L); 1307 // the code below should be shared with interpreter macro 1308 // assembler implementation 1309 { 1310 Label unlock; 1311 // BasicObjectLock will be first in list, since this is a 1312 // synchronized method. However, need to check that the object 1313 // has not been unlocked by an explicit monitorexit bytecode. 1314 const Address monitor(rbp, 1315 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1316 wordSize - (int)sizeof(BasicObjectLock))); 1317 1318 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1319 1320 // monitor expect in c_rarg1 for slow unlock path 1321 __ lea(regmon, monitor); // address of first monitor 1322 1323 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1324 __ testptr(t, t); 1325 __ jcc(Assembler::notZero, unlock); 1326 1327 // Entry already unlocked, need to throw exception 1328 __ MacroAssembler::call_VM(noreg, 1329 CAST_FROM_FN_PTR(address, 1330 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1331 __ should_not_reach_here(); 1332 1333 __ bind(unlock); 1334 __ unlock_object(regmon); 1335 } 1336 __ bind(L); 1337 } 1338 1339 // jvmti support 1340 // Note: This must happen _after_ handling/throwing any exceptions since 1341 // the exception handler code notifies the runtime of method exits 1342 // too. If this happens before, method entry/exit notifications are 1343 // not properly paired (was bug - gri 11/22/99). 1344 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1345 1346 // restore potential result in edx:eax, call result handler to 1347 // restore potential result in ST0 & handle result 1348 1349 __ pop(ltos); 1350 LP64_ONLY( __ pop(dtos)); 1351 1352 __ movptr(t, Address(rbp, 1353 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1354 __ call(t); 1355 1356 // remove activation 1357 __ movptr(t, Address(rbp, 1358 frame::interpreter_frame_sender_sp_offset * 1359 wordSize)); // get sender sp 1360 __ leave(); // remove frame anchor 1361 __ pop(rdi); // get return address 1362 __ mov(rsp, t); // set sp to sender sp 1363 __ jmp(rdi); 1364 1365 if (inc_counter) { 1366 // Handle overflow of counter and compile method 1367 __ bind(invocation_counter_overflow); 1368 generate_counter_overflow(continue_after_compile); 1369 } 1370 1371 return entry_point; 1372 } 1373 1374 // Abstract method entry 1375 // Attempt to execute abstract method. Throw exception 1376 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1377 1378 address entry_point = __ pc(); 1379 1380 // abstract method entry 1381 1382 // pop return address, reset last_sp to NULL 1383 __ empty_expression_stack(); 1384 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1385 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1386 1387 // throw exception 1388 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); 1389 // the call_VM checks for exception, so we should never return here. 1390 __ should_not_reach_here(); 1391 1392 return entry_point; 1393 } 1394 1395 // 1396 // Generic interpreted method entry to (asm) interpreter 1397 // 1398 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1399 // determine code generation flags 1400 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1401 1402 // ebx: Method* 1403 // rbcp: sender sp 1404 address entry_point = __ pc(); 1405 1406 const Address constMethod(rbx, Method::const_offset()); 1407 const Address access_flags(rbx, Method::access_flags_offset()); 1408 const Address size_of_parameters(rdx, 1409 ConstMethod::size_of_parameters_offset()); 1410 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1411 1412 1413 // get parameter size (always needed) 1414 __ movptr(rdx, constMethod); 1415 __ load_unsigned_short(rcx, size_of_parameters); 1416 1417 // rbx: Method* 1418 // rcx: size of parameters 1419 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1420 1421 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1422 __ subl(rdx, rcx); // rdx = no. of additional locals 1423 1424 // YYY 1425 // __ incrementl(rdx); 1426 // __ andl(rdx, -2); 1427 1428 // see if we've got enough room on the stack for locals plus overhead. 1429 generate_stack_overflow_check(); 1430 1431 // get return address 1432 __ pop(rax); 1433 1434 // compute beginning of parameters 1435 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1436 1437 // rdx - # of additional locals 1438 // allocate space for locals 1439 // explicitly initialize locals 1440 { 1441 Label exit, loop; 1442 __ testl(rdx, rdx); 1443 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1444 __ bind(loop); 1445 __ push((int) NULL_WORD); // initialize local variables 1446 __ decrementl(rdx); // until everything initialized 1447 __ jcc(Assembler::greater, loop); 1448 __ bind(exit); 1449 } 1450 1451 // initialize fixed part of activation frame 1452 generate_fixed_frame(false); 1453 1454 // make sure method is not native & not abstract 1455 #ifdef ASSERT 1456 __ movl(rax, access_flags); 1457 { 1458 Label L; 1459 __ testl(rax, JVM_ACC_NATIVE); 1460 __ jcc(Assembler::zero, L); 1461 __ stop("tried to execute native method as non-native"); 1462 __ bind(L); 1463 } 1464 { 1465 Label L; 1466 __ testl(rax, JVM_ACC_ABSTRACT); 1467 __ jcc(Assembler::zero, L); 1468 __ stop("tried to execute abstract method in interpreter"); 1469 __ bind(L); 1470 } 1471 #endif 1472 1473 // Since at this point in the method invocation the exception 1474 // handler would try to exit the monitor of synchronized methods 1475 // which hasn't been entered yet, we set the thread local variable 1476 // _do_not_unlock_if_synchronized to true. The remove_activation 1477 // will check this flag. 1478 1479 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1480 NOT_LP64(__ get_thread(thread)); 1481 const Address do_not_unlock_if_synchronized(thread, 1482 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1483 __ movbool(do_not_unlock_if_synchronized, true); 1484 1485 __ profile_parameters_type(rax, rcx, rdx); 1486 // increment invocation count & check for overflow 1487 Label invocation_counter_overflow; 1488 Label profile_method; 1489 Label profile_method_continue; 1490 if (inc_counter) { 1491 generate_counter_incr(&invocation_counter_overflow, 1492 &profile_method, 1493 &profile_method_continue); 1494 if (ProfileInterpreter) { 1495 __ bind(profile_method_continue); 1496 } 1497 } 1498 1499 Label continue_after_compile; 1500 __ bind(continue_after_compile); 1501 1502 // check for synchronized interpreted methods 1503 bang_stack_shadow_pages(false); 1504 1505 // reset the _do_not_unlock_if_synchronized flag 1506 NOT_LP64(__ get_thread(thread)); 1507 __ movbool(do_not_unlock_if_synchronized, false); 1508 1509 // check for synchronized methods 1510 // Must happen AFTER invocation_counter check and stack overflow check, 1511 // so method is not locked if overflows. 1512 if (synchronized) { 1513 // Allocate monitor and lock method 1514 lock_method(); 1515 } else { 1516 // no synchronization necessary 1517 #ifdef ASSERT 1518 { 1519 Label L; 1520 __ movl(rax, access_flags); 1521 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1522 __ jcc(Assembler::zero, L); 1523 __ stop("method needs synchronization"); 1524 __ bind(L); 1525 } 1526 #endif 1527 } 1528 1529 // start execution 1530 #ifdef ASSERT 1531 { 1532 Label L; 1533 const Address monitor_block_top (rbp, 1534 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1535 __ movptr(rax, monitor_block_top); 1536 __ cmpptr(rax, rsp); 1537 __ jcc(Assembler::equal, L); 1538 __ stop("broken stack frame setup in interpreter"); 1539 __ bind(L); 1540 } 1541 #endif 1542 1543 // jvmti support 1544 __ notify_method_entry(); 1545 1546 __ dispatch_next(vtos); 1547 1548 // invocation counter overflow 1549 if (inc_counter) { 1550 if (ProfileInterpreter) { 1551 // We have decided to profile this method in the interpreter 1552 __ bind(profile_method); 1553 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1554 __ set_method_data_pointer_for_bcp(); 1555 __ get_method(rbx); 1556 __ jmp(profile_method_continue); 1557 } 1558 // Handle overflow of counter and compile method 1559 __ bind(invocation_counter_overflow); 1560 generate_counter_overflow(continue_after_compile); 1561 } 1562 1563 return entry_point; 1564 } 1565 1566 //----------------------------------------------------------------------------- 1567 // Exceptions 1568 1569 void TemplateInterpreterGenerator::generate_throw_exception() { 1570 // Entry point in previous activation (i.e., if the caller was 1571 // interpreted) 1572 Interpreter::_rethrow_exception_entry = __ pc(); 1573 // Restore sp to interpreter_frame_last_sp even though we are going 1574 // to empty the expression stack for the exception processing. 1575 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1576 // rax: exception 1577 // rdx: return address/pc that threw exception 1578 __ restore_bcp(); // r13/rsi points to call/send 1579 __ restore_locals(); 1580 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1581 // Entry point for exceptions thrown within interpreter code 1582 Interpreter::_throw_exception_entry = __ pc(); 1583 // expression stack is undefined here 1584 // rax: exception 1585 // r13/rsi: exception bcp 1586 __ verify_oop(rax); 1587 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1588 LP64_ONLY(__ mov(c_rarg1, rax)); 1589 1590 // expression stack must be empty before entering the VM in case of 1591 // an exception 1592 __ empty_expression_stack(); 1593 // find exception handler address and preserve exception oop 1594 __ call_VM(rdx, 1595 CAST_FROM_FN_PTR(address, 1596 InterpreterRuntime::exception_handler_for_exception), 1597 rarg); 1598 // rax: exception handler entry point 1599 // rdx: preserved exception oop 1600 // r13/rsi: bcp for exception handler 1601 __ push_ptr(rdx); // push exception which is now the only value on the stack 1602 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1603 1604 // If the exception is not handled in the current frame the frame is 1605 // removed and the exception is rethrown (i.e. exception 1606 // continuation is _rethrow_exception). 1607 // 1608 // Note: At this point the bci is still the bxi for the instruction 1609 // which caused the exception and the expression stack is 1610 // empty. Thus, for any VM calls at this point, GC will find a legal 1611 // oop map (with empty expression stack). 1612 1613 // In current activation 1614 // tos: exception 1615 // esi: exception bcp 1616 1617 // 1618 // JVMTI PopFrame support 1619 // 1620 1621 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1622 __ empty_expression_stack(); 1623 // Set the popframe_processing bit in pending_popframe_condition 1624 // indicating that we are currently handling popframe, so that 1625 // call_VMs that may happen later do not trigger new popframe 1626 // handling cycles. 1627 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1628 NOT_LP64(__ get_thread(thread)); 1629 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1630 __ orl(rdx, JavaThread::popframe_processing_bit); 1631 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1632 1633 { 1634 // Check to see whether we are returning to a deoptimized frame. 1635 // (The PopFrame call ensures that the caller of the popped frame is 1636 // either interpreted or compiled and deoptimizes it if compiled.) 1637 // In this case, we can't call dispatch_next() after the frame is 1638 // popped, but instead must save the incoming arguments and restore 1639 // them after deoptimization has occurred. 1640 // 1641 // Note that we don't compare the return PC against the 1642 // deoptimization blob's unpack entry because of the presence of 1643 // adapter frames in C2. 1644 Label caller_not_deoptimized; 1645 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1646 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1647 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1648 InterpreterRuntime::interpreter_contains), rarg); 1649 __ testl(rax, rax); 1650 __ jcc(Assembler::notZero, caller_not_deoptimized); 1651 1652 // Compute size of arguments for saving when returning to 1653 // deoptimized caller 1654 __ get_method(rax); 1655 __ movptr(rax, Address(rax, Method::const_offset())); 1656 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1657 size_of_parameters_offset()))); 1658 __ shll(rax, Interpreter::logStackElementSize); 1659 __ restore_locals(); 1660 __ subptr(rlocals, rax); 1661 __ addptr(rlocals, wordSize); 1662 // Save these arguments 1663 NOT_LP64(__ get_thread(thread)); 1664 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1665 Deoptimization:: 1666 popframe_preserve_args), 1667 thread, rax, rlocals); 1668 1669 __ remove_activation(vtos, rdx, 1670 /* throw_monitor_exception */ false, 1671 /* install_monitor_exception */ false, 1672 /* notify_jvmdi */ false); 1673 1674 // Inform deoptimization that it is responsible for restoring 1675 // these arguments 1676 NOT_LP64(__ get_thread(thread)); 1677 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1678 JavaThread::popframe_force_deopt_reexecution_bit); 1679 1680 // Continue in deoptimization handler 1681 __ jmp(rdx); 1682 1683 __ bind(caller_not_deoptimized); 1684 } 1685 1686 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1687 /* throw_monitor_exception */ false, 1688 /* install_monitor_exception */ false, 1689 /* notify_jvmdi */ false); 1690 1691 // Finish with popframe handling 1692 // A previous I2C followed by a deoptimization might have moved the 1693 // outgoing arguments further up the stack. PopFrame expects the 1694 // mutations to those outgoing arguments to be preserved and other 1695 // constraints basically require this frame to look exactly as 1696 // though it had previously invoked an interpreted activation with 1697 // no space between the top of the expression stack (current 1698 // last_sp) and the top of stack. Rather than force deopt to 1699 // maintain this kind of invariant all the time we call a small 1700 // fixup routine to move the mutated arguments onto the top of our 1701 // expression stack if necessary. 1702 #ifndef _LP64 1703 __ mov(rax, rsp); 1704 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1705 __ get_thread(thread); 1706 // PC must point into interpreter here 1707 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1708 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1709 __ get_thread(thread); 1710 #else 1711 __ mov(c_rarg1, rsp); 1712 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1713 // PC must point into interpreter here 1714 __ set_last_Java_frame(noreg, rbp, __ pc()); 1715 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1716 #endif 1717 __ reset_last_Java_frame(thread, true); 1718 1719 // Restore the last_sp and null it out 1720 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1721 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1722 1723 __ restore_bcp(); 1724 __ restore_locals(); 1725 // The method data pointer was incremented already during 1726 // call profiling. We have to restore the mdp for the current bcp. 1727 if (ProfileInterpreter) { 1728 __ set_method_data_pointer_for_bcp(); 1729 } 1730 1731 // Clear the popframe condition flag 1732 NOT_LP64(__ get_thread(thread)); 1733 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1734 JavaThread::popframe_inactive); 1735 1736 #if INCLUDE_JVMTI 1737 { 1738 Label L_done; 1739 const Register local0 = rlocals; 1740 1741 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1742 __ jcc(Assembler::notEqual, L_done); 1743 1744 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1745 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1746 1747 __ get_method(rdx); 1748 __ movptr(rax, Address(local0, 0)); 1749 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1750 1751 __ testptr(rax, rax); 1752 __ jcc(Assembler::zero, L_done); 1753 1754 __ movptr(Address(rbx, 0), rax); 1755 __ bind(L_done); 1756 } 1757 #endif // INCLUDE_JVMTI 1758 1759 __ dispatch_next(vtos); 1760 // end of PopFrame support 1761 1762 Interpreter::_remove_activation_entry = __ pc(); 1763 1764 // preserve exception over this code sequence 1765 __ pop_ptr(rax); 1766 NOT_LP64(__ get_thread(thread)); 1767 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1768 // remove the activation (without doing throws on illegalMonitorExceptions) 1769 __ remove_activation(vtos, rdx, false, true, false); 1770 // restore exception 1771 NOT_LP64(__ get_thread(thread)); 1772 __ get_vm_result(rax, thread); 1773 1774 // In between activations - previous activation type unknown yet 1775 // compute continuation point - the continuation point expects the 1776 // following registers set up: 1777 // 1778 // rax: exception 1779 // rdx: return address/pc that threw exception 1780 // rsp: expression stack of caller 1781 // rbp: ebp of caller 1782 __ push(rax); // save exception 1783 __ push(rdx); // save return address 1784 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1785 SharedRuntime::exception_handler_for_return_address), 1786 thread, rdx); 1787 __ mov(rbx, rax); // save exception handler 1788 __ pop(rdx); // restore return address 1789 __ pop(rax); // restore exception 1790 // Note that an "issuing PC" is actually the next PC after the call 1791 __ jmp(rbx); // jump to exception 1792 // handler of caller 1793 } 1794 1795 1796 // 1797 // JVMTI ForceEarlyReturn support 1798 // 1799 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1800 address entry = __ pc(); 1801 1802 __ restore_bcp(); 1803 __ restore_locals(); 1804 __ empty_expression_stack(); 1805 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1806 1807 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1808 NOT_LP64(__ get_thread(thread)); 1809 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1810 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1811 1812 // Clear the earlyret state 1813 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1814 1815 __ remove_activation(state, rsi, 1816 false, /* throw_monitor_exception */ 1817 false, /* install_monitor_exception */ 1818 true); /* notify_jvmdi */ 1819 __ jmp(rsi); 1820 1821 return entry; 1822 } // end of ForceEarlyReturn support 1823 1824 1825 //----------------------------------------------------------------------------- 1826 // Helper for vtos entry point generation 1827 1828 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1829 address& bep, 1830 address& cep, 1831 address& sep, 1832 address& aep, 1833 address& iep, 1834 address& lep, 1835 address& fep, 1836 address& dep, 1837 address& vep) { 1838 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1839 Label L; 1840 aep = __ pc(); // atos entry point 1841 __ push_ptr(); 1842 __ jmp(L); 1843 #ifndef _LP64 1844 fep = __ pc(); // ftos entry point 1845 __ push(ftos); 1846 __ jmp(L); 1847 dep = __ pc(); // dtos entry point 1848 __ push(dtos); 1849 __ jmp(L); 1850 #else 1851 fep = __ pc(); // ftos entry point 1852 __ push_f(xmm0); 1853 __ jmp(L); 1854 dep = __ pc(); // dtos entry point 1855 __ push_d(xmm0); 1856 __ jmp(L); 1857 #endif // _LP64 1858 lep = __ pc(); // ltos entry point 1859 __ push_l(); 1860 __ jmp(L); 1861 bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point 1862 __ push_i(); 1863 vep = __ pc(); // vtos entry point 1864 __ bind(L); 1865 generate_and_dispatch(t); 1866 } 1867 1868 //----------------------------------------------------------------------------- 1869 1870 // Non-product code 1871 #ifndef PRODUCT 1872 1873 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1874 address entry = __ pc(); 1875 1876 #ifndef _LP64 1877 // prepare expression stack 1878 __ pop(rcx); // pop return address so expression stack is 'pure' 1879 __ push(state); // save tosca 1880 1881 // pass tosca registers as arguments & call tracer 1882 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1883 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1884 __ pop(state); // restore tosca 1885 1886 // return 1887 __ jmp(rcx); 1888 #else 1889 __ push(state); 1890 __ push(c_rarg0); 1891 __ push(c_rarg1); 1892 __ push(c_rarg2); 1893 __ push(c_rarg3); 1894 __ mov(c_rarg2, rax); // Pass itos 1895 #ifdef _WIN64 1896 __ movflt(xmm3, xmm0); // Pass ftos 1897 #endif 1898 __ call_VM(noreg, 1899 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1900 c_rarg1, c_rarg2, c_rarg3); 1901 __ pop(c_rarg3); 1902 __ pop(c_rarg2); 1903 __ pop(c_rarg1); 1904 __ pop(c_rarg0); 1905 __ pop(state); 1906 __ ret(0); // return from result handler 1907 #endif // _LP64 1908 1909 return entry; 1910 } 1911 1912 void TemplateInterpreterGenerator::count_bytecode() { 1913 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1914 } 1915 1916 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1917 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1918 } 1919 1920 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1921 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1922 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1923 __ orl(rbx, 1924 ((int) t->bytecode()) << 1925 BytecodePairHistogram::log2_number_of_codes); 1926 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1927 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1928 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1929 } 1930 1931 1932 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1933 // Call a little run-time stub to avoid blow-up for each bytecode. 1934 // The run-time runtime saves the right registers, depending on 1935 // the tosca in-state for the given template. 1936 1937 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1938 "entry must have been generated"); 1939 #ifndef _LP64 1940 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1941 #else 1942 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1943 __ andptr(rsp, -16); // align stack as required by ABI 1944 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1945 __ mov(rsp, r12); // restore sp 1946 __ reinit_heapbase(); 1947 #endif // _LP64 1948 } 1949 1950 1951 void TemplateInterpreterGenerator::stop_interpreter_at() { 1952 Label L; 1953 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1954 StopInterpreterAt); 1955 __ jcc(Assembler::notEqual, L); 1956 __ int3(); 1957 __ bind(L); 1958 } 1959 #endif // !PRODUCT