1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "interpreter/templateInterpreterGenerator.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/methodData.hpp" 37 #include "oops/method.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "oops/valueKlass.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "prims/jvmtiThreadState.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/deoptimization.hpp" 44 #include "runtime/frame.inline.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/synchronizer.hpp" 48 #include "runtime/timer.hpp" 49 #include "runtime/vframeArray.hpp" 50 #include "utilities/debug.hpp" 51 #include "utilities/macros.hpp" 52 53 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)-> 54 55 // Size of interpreter code. Increase if too small. Interpreter will 56 // fail with a guarantee ("not enough space for interpreter generation"); 57 // if too small. 58 // Run with +PrintInterpreter to get the VM to print out the size. 59 // Max size with JVMTI 60 #ifdef AMD64 61 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(280) NOT_JVMCI(268) * 1024; 62 #else 63 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 64 #endif // AMD64 65 66 // Global Register Names 67 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 68 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 69 70 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 71 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 72 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 73 74 75 //----------------------------------------------------------------------------- 76 77 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 78 address entry = __ pc(); 79 80 #ifdef ASSERT 81 { 82 Label L; 83 __ lea(rax, Address(rbp, 84 frame::interpreter_frame_monitor_block_top_offset * 85 wordSize)); 86 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 87 // grows negative) 88 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 89 __ stop ("interpreter frame not set up"); 90 __ bind(L); 91 } 92 #endif // ASSERT 93 // Restore bcp under the assumption that the current frame is still 94 // interpreted 95 __ restore_bcp(); 96 97 // expression stack must be empty before entering the VM if an 98 // exception happened 99 __ empty_expression_stack(); 100 // throw exception 101 __ call_VM(noreg, 102 CAST_FROM_FN_PTR(address, 103 InterpreterRuntime::throw_StackOverflowError)); 104 return entry; 105 } 106 107 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 108 address entry = __ pc(); 109 // The expression stack must be empty before entering the VM if an 110 // exception happened. 111 __ empty_expression_stack(); 112 113 // Setup parameters. 114 // ??? convention: expect aberrant index in register ebx/rbx. 115 // Pass array to create more detailed exceptions. 116 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 117 __ call_VM(noreg, 118 CAST_FROM_FN_PTR(address, 119 InterpreterRuntime:: 120 throw_ArrayIndexOutOfBoundsException), 121 rarg, rbx); 122 return entry; 123 } 124 125 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 126 address entry = __ pc(); 127 128 // object is at TOS 129 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 130 __ pop(rarg); 131 132 // expression stack must be empty before entering the VM if an 133 // exception happened 134 __ empty_expression_stack(); 135 136 __ call_VM(noreg, 137 CAST_FROM_FN_PTR(address, 138 InterpreterRuntime:: 139 throw_ClassCastException), 140 rarg); 141 return entry; 142 } 143 144 address TemplateInterpreterGenerator::generate_exception_handler_common( 145 const char* name, const char* message, bool pass_oop) { 146 assert(!pass_oop || message == NULL, "either oop or message but not both"); 147 address entry = __ pc(); 148 149 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 150 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 151 152 if (pass_oop) { 153 // object is at TOS 154 __ pop(rarg2); 155 } 156 // expression stack must be empty before entering the VM if an 157 // exception happened 158 __ empty_expression_stack(); 159 // setup parameters 160 __ lea(rarg, ExternalAddress((address)name)); 161 if (pass_oop) { 162 __ call_VM(rax, CAST_FROM_FN_PTR(address, 163 InterpreterRuntime:: 164 create_klass_exception), 165 rarg, rarg2); 166 } else { 167 __ lea(rarg2, ExternalAddress((address)message)); 168 __ call_VM(rax, 169 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 170 rarg, rarg2); 171 } 172 // throw exception 173 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 174 return entry; 175 } 176 177 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 178 address entry = __ pc(); 179 180 #ifndef _LP64 181 #ifdef COMPILER2 182 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 183 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 184 for (int i = 1; i < 8; i++) { 185 __ ffree(i); 186 } 187 } else if (UseSSE < 2) { 188 __ empty_FPU_stack(); 189 } 190 #endif // COMPILER2 191 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 192 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 193 } else { 194 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 195 } 196 197 if (state == ftos) { 198 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 199 } else if (state == dtos) { 200 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 201 } 202 #endif // _LP64 203 204 // Restore stack bottom in case i2c adjusted stack 205 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 206 // and NULL it as marker that esp is now tos until next java call 207 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 208 209 if (state == atos && ValueTypeReturnedAsFields) { 210 #ifndef _LP64 211 __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf()); 212 #else 213 // A value type might be returned. If fields are in registers we 214 // need to allocate a value type instance and initialize it with 215 // the value of the fields. 216 Label skip, slow_case; 217 // We only need a new buffered value if a new one is not returned 218 __ testptr(rax, 1); 219 __ jcc(Assembler::zero, skip); 220 221 // Try to allocate a new buffered value (from the heap) 222 if (UseTLAB) { 223 __ mov(rbx, rax); 224 __ andptr(rbx, -2); 225 226 __ movl(r14, Address(rbx, Klass::layout_helper_offset())); 227 228 __ movptr(r13, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 229 __ lea(r14, Address(r13, r14, Address::times_1)); 230 __ cmpptr(r14, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); 231 __ jcc(Assembler::above, slow_case); 232 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), r14); 233 __ movptr(Address(r13, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::always_locked_prototype()); 234 235 __ xorl(rax, rax); // use zero reg to clear memory (shorter code) 236 __ store_klass_gap(r13, rax); // zero klass gap for compressed oops 237 __ mov(rax, rbx); 238 __ store_klass(r13, rbx); // klass 239 240 // We have our new buffered value, initialize its fields with a 241 // value class specific handler 242 __ movptr(rbx, Address(rax, InstanceKlass::adr_valueklass_fixed_block_offset())); 243 __ movptr(rbx, Address(rbx, ValueKlass::pack_handler_offset())); 244 __ mov(rax, r13); 245 __ call(rbx); 246 __ jmp(skip); 247 } 248 249 __ bind(slow_case); 250 // We failed to allocate a new value, fall back to a runtime 251 // call. Some oop field may be live in some registers but we can't 252 // tell. That runtime call will take care of preserving them 253 // across a GC if there's one. 254 __ super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf()); 255 __ bind(skip); 256 #endif 257 } 258 259 __ restore_bcp(); 260 __ restore_locals(); 261 262 if (state == atos) { 263 Register mdp = rbx; 264 Register tmp = rcx; 265 __ profile_return_type(mdp, rax, tmp); 266 } 267 268 const Register cache = rbx; 269 const Register index = rcx; 270 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 271 272 const Register flags = cache; 273 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 274 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 275 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 276 277 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 278 if (JvmtiExport::can_pop_frame()) { 279 NOT_LP64(__ get_thread(java_thread)); 280 __ check_and_handle_popframe(java_thread); 281 } 282 if (JvmtiExport::can_force_early_return()) { 283 NOT_LP64(__ get_thread(java_thread)); 284 __ check_and_handle_earlyret(java_thread); 285 } 286 287 __ dispatch_next(state, step); 288 289 return entry; 290 } 291 292 293 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 294 address entry = __ pc(); 295 296 #ifndef _LP64 297 if (state == ftos) { 298 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 299 } else if (state == dtos) { 300 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 301 } 302 #endif // _LP64 303 304 // NULL last_sp until next java call 305 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 306 __ restore_bcp(); 307 __ restore_locals(); 308 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 309 NOT_LP64(__ get_thread(thread)); 310 #if INCLUDE_JVMCI 311 // Check if we need to take lock at entry of synchronized method. This can 312 // only occur on method entry so emit it only for vtos with step 0. 313 if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) { 314 Label L; 315 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 316 __ jcc(Assembler::zero, L); 317 // Clear flag. 318 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 319 // Satisfy calling convention for lock_method(). 320 __ get_method(rbx); 321 // Take lock. 322 lock_method(); 323 __ bind(L); 324 } else { 325 #ifdef ASSERT 326 if (EnableJVMCI) { 327 Label L; 328 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); 329 __ jcc(Assembler::zero, L); 330 __ stop("unexpected pending monitor in deopt entry"); 331 __ bind(L); 332 } 333 #endif 334 } 335 #endif 336 // handle exceptions 337 { 338 Label L; 339 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 340 __ jcc(Assembler::zero, L); 341 __ call_VM(noreg, 342 CAST_FROM_FN_PTR(address, 343 InterpreterRuntime::throw_pending_exception)); 344 __ should_not_reach_here(); 345 __ bind(L); 346 } 347 if (continuation == NULL) { 348 __ dispatch_next(state, step); 349 } else { 350 __ jump_to_entry(continuation); 351 } 352 return entry; 353 } 354 355 address TemplateInterpreterGenerator::generate_result_handler_for( 356 BasicType type) { 357 address entry = __ pc(); 358 switch (type) { 359 case T_BOOLEAN: __ c2bool(rax); break; 360 #ifndef _LP64 361 case T_CHAR : __ andptr(rax, 0xFFFF); break; 362 #else 363 case T_CHAR : __ movzwl(rax, rax); break; 364 #endif // _LP64 365 case T_BYTE : __ sign_extend_byte(rax); break; 366 case T_SHORT : __ sign_extend_short(rax); break; 367 case T_INT : /* nothing to do */ break; 368 case T_LONG : /* nothing to do */ break; 369 case T_VOID : /* nothing to do */ break; 370 #ifndef _LP64 371 case T_DOUBLE : 372 case T_FLOAT : 373 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 374 __ pop(t); // remove return address first 375 // Must return a result for interpreter or compiler. In SSE 376 // mode, results are returned in xmm0 and the FPU stack must 377 // be empty. 378 if (type == T_FLOAT && UseSSE >= 1) { 379 // Load ST0 380 __ fld_d(Address(rsp, 0)); 381 // Store as float and empty fpu stack 382 __ fstp_s(Address(rsp, 0)); 383 // and reload 384 __ movflt(xmm0, Address(rsp, 0)); 385 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 386 __ movdbl(xmm0, Address(rsp, 0)); 387 } else { 388 // restore ST0 389 __ fld_d(Address(rsp, 0)); 390 } 391 // and pop the temp 392 __ addptr(rsp, 2 * wordSize); 393 __ push(t); // restore return address 394 } 395 break; 396 #else 397 case T_FLOAT : /* nothing to do */ break; 398 case T_DOUBLE : /* nothing to do */ break; 399 #endif // _LP64 400 401 case T_VALUETYPE: // fall through (value types are handled with oops) 402 case T_OBJECT : 403 // retrieve result from frame 404 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 405 // and verify it 406 __ verify_oop(rax); 407 break; 408 default : ShouldNotReachHere(); 409 } 410 __ ret(0); // return from result handler 411 return entry; 412 } 413 414 address TemplateInterpreterGenerator::generate_safept_entry_for( 415 TosState state, 416 address runtime_entry) { 417 address entry = __ pc(); 418 __ push(state); 419 __ call_VM(noreg, runtime_entry); 420 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 421 return entry; 422 } 423 424 425 426 // Helpers for commoning out cases in the various type of method entries. 427 // 428 429 430 // increment invocation count & check for overflow 431 // 432 // Note: checking for negative value instead of overflow 433 // so we have a 'sticky' overflow test 434 // 435 // rbx: method 436 // rcx: invocation counter 437 // 438 void TemplateInterpreterGenerator::generate_counter_incr( 439 Label* overflow, 440 Label* profile_method, 441 Label* profile_method_continue) { 442 Label done; 443 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 444 if (TieredCompilation) { 445 int increment = InvocationCounter::count_increment; 446 Label no_mdo; 447 if (ProfileInterpreter) { 448 // Are we profiling? 449 __ movptr(rax, Address(rbx, Method::method_data_offset())); 450 __ testptr(rax, rax); 451 __ jccb(Assembler::zero, no_mdo); 452 // Increment counter in the MDO 453 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 454 in_bytes(InvocationCounter::counter_offset())); 455 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 456 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 457 __ jmp(done); 458 } 459 __ bind(no_mdo); 460 // Increment counter in MethodCounters 461 const Address invocation_counter(rax, 462 MethodCounters::invocation_counter_offset() + 463 InvocationCounter::counter_offset()); 464 __ get_method_counters(rbx, rax, done); 465 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 466 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 467 false, Assembler::zero, overflow); 468 __ bind(done); 469 } else { // not TieredCompilation 470 const Address backedge_counter(rax, 471 MethodCounters::backedge_counter_offset() + 472 InvocationCounter::counter_offset()); 473 const Address invocation_counter(rax, 474 MethodCounters::invocation_counter_offset() + 475 InvocationCounter::counter_offset()); 476 477 __ get_method_counters(rbx, rax, done); 478 479 if (ProfileInterpreter) { 480 __ incrementl(Address(rax, 481 MethodCounters::interpreter_invocation_counter_offset())); 482 } 483 // Update standard invocation counters 484 __ movl(rcx, invocation_counter); 485 __ incrementl(rcx, InvocationCounter::count_increment); 486 __ movl(invocation_counter, rcx); // save invocation count 487 488 __ movl(rax, backedge_counter); // load backedge counter 489 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 490 491 __ addl(rcx, rax); // add both counters 492 493 // profile_method is non-null only for interpreted method so 494 // profile_method != NULL == !native_call 495 496 if (ProfileInterpreter && profile_method != NULL) { 497 // Test to see if we should create a method data oop 498 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 499 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 500 __ jcc(Assembler::less, *profile_method_continue); 501 502 // if no method data exists, go to profile_method 503 __ test_method_data_pointer(rax, *profile_method); 504 } 505 506 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 507 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 508 __ jcc(Assembler::aboveEqual, *overflow); 509 __ bind(done); 510 } 511 } 512 513 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 514 515 // Asm interpreter on entry 516 // r14/rdi - locals 517 // r13/rsi - bcp 518 // rbx - method 519 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 520 // rbp - interpreter frame 521 522 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 523 // Everything as it was on entry 524 // rdx is not restored. Doesn't appear to really be set. 525 526 // InterpreterRuntime::frequency_counter_overflow takes two 527 // arguments, the first (thread) is passed by call_VM, the second 528 // indicates if the counter overflow occurs at a backwards branch 529 // (NULL bcp). We pass zero for it. The call returns the address 530 // of the verified entry point for the method or NULL if the 531 // compilation did not complete (either went background or bailed 532 // out). 533 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 534 __ movl(rarg, 0); 535 __ call_VM(noreg, 536 CAST_FROM_FN_PTR(address, 537 InterpreterRuntime::frequency_counter_overflow), 538 rarg); 539 540 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 541 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 542 // and jump to the interpreted entry. 543 __ jmp(do_continue, relocInfo::none); 544 } 545 546 // See if we've got enough room on the stack for locals plus overhead below 547 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 548 // without going through the signal handler, i.e., reserved and yellow zones 549 // will not be made usable. The shadow zone must suffice to handle the 550 // overflow. 551 // The expression stack grows down incrementally, so the normal guard 552 // page mechanism will work for that. 553 // 554 // NOTE: Since the additional locals are also always pushed (wasn't 555 // obvious in generate_fixed_frame) so the guard should work for them 556 // too. 557 // 558 // Args: 559 // rdx: number of additional locals this frame needs (what we must check) 560 // rbx: Method* 561 // 562 // Kills: 563 // rax 564 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 565 566 // monitor entry size: see picture of stack in frame_x86.hpp 567 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 568 569 // total overhead size: entry_size + (saved rbp through expr stack 570 // bottom). be sure to change this if you add/subtract anything 571 // to/from the overhead area 572 const int overhead_size = 573 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 574 575 const int page_size = os::vm_page_size(); 576 577 Label after_frame_check; 578 579 // see if the frame is greater than one page in size. If so, 580 // then we need to verify there is enough stack space remaining 581 // for the additional locals. 582 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 583 __ jcc(Assembler::belowEqual, after_frame_check); 584 585 // compute rsp as if this were going to be the last frame on 586 // the stack before the red zone 587 588 Label after_frame_check_pop; 589 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 590 #ifndef _LP64 591 __ push(thread); 592 __ get_thread(thread); 593 #endif 594 595 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 596 597 // locals + overhead, in bytes 598 __ mov(rax, rdx); 599 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 600 __ addptr(rax, overhead_size); 601 602 #ifdef ASSERT 603 Label limit_okay; 604 // Verify that thread stack overflow limit is non-zero. 605 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 606 __ jcc(Assembler::notEqual, limit_okay); 607 __ stop("stack overflow limit is zero"); 608 __ bind(limit_okay); 609 #endif 610 611 // Add locals/frame size to stack limit. 612 __ addptr(rax, stack_limit); 613 614 // Check against the current stack bottom. 615 __ cmpptr(rsp, rax); 616 617 __ jcc(Assembler::above, after_frame_check_pop); 618 NOT_LP64(__ pop(rsi)); // get saved bcp 619 620 // Restore sender's sp as SP. This is necessary if the sender's 621 // frame is an extended compiled frame (see gen_c2i_adapter()) 622 // and safer anyway in case of JSR292 adaptations. 623 624 __ pop(rax); // return address must be moved if SP is changed 625 __ mov(rsp, rbcp); 626 __ push(rax); 627 628 // Note: the restored frame is not necessarily interpreted. 629 // Use the shared runtime version of the StackOverflowError. 630 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 631 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 632 // all done with frame size check 633 __ bind(after_frame_check_pop); 634 NOT_LP64(__ pop(rsi)); 635 636 // all done with frame size check 637 __ bind(after_frame_check); 638 } 639 640 // Allocate monitor and lock method (asm interpreter) 641 // 642 // Args: 643 // rbx: Method* 644 // r14/rdi: locals 645 // 646 // Kills: 647 // rax 648 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 649 // rscratch1, rscratch2 (scratch regs) 650 void TemplateInterpreterGenerator::lock_method() { 651 // synchronize method 652 const Address access_flags(rbx, Method::access_flags_offset()); 653 const Address monitor_block_top( 654 rbp, 655 frame::interpreter_frame_monitor_block_top_offset * wordSize); 656 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 657 658 #ifdef ASSERT 659 { 660 Label L; 661 __ movl(rax, access_flags); 662 __ testl(rax, JVM_ACC_SYNCHRONIZED); 663 __ jcc(Assembler::notZero, L); 664 __ stop("method doesn't need synchronization"); 665 __ bind(L); 666 } 667 #endif // ASSERT 668 669 // get synchronization object 670 { 671 Label done; 672 __ movl(rax, access_flags); 673 __ testl(rax, JVM_ACC_STATIC); 674 // get receiver (assume this is frequent case) 675 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 676 __ jcc(Assembler::zero, done); 677 __ load_mirror(rax, rbx); 678 679 #ifdef ASSERT 680 { 681 Label L; 682 __ testptr(rax, rax); 683 __ jcc(Assembler::notZero, L); 684 __ stop("synchronization object is NULL"); 685 __ bind(L); 686 } 687 #endif // ASSERT 688 689 __ bind(done); 690 __ resolve(IS_NOT_NULL, rax); 691 } 692 693 // add space for monitor & lock 694 __ subptr(rsp, entry_size); // add space for a monitor entry 695 __ movptr(monitor_block_top, rsp); // set new monitor block top 696 // store object 697 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 698 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 699 __ movptr(lockreg, rsp); // object address 700 __ lock_object(lockreg); 701 } 702 703 // Generate a fixed interpreter frame. This is identical setup for 704 // interpreted methods and for native methods hence the shared code. 705 // 706 // Args: 707 // rax: return address 708 // rbx: Method* 709 // r14/rdi: pointer to locals 710 // r13/rsi: sender sp 711 // rdx: cp cache 712 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 713 // initialize fixed part of activation frame 714 __ push(rax); // save return address 715 __ enter(); // save old & set new rbp 716 __ push(rbcp); // set sender sp 717 __ push((int)NULL_WORD); // leave last_sp as null 718 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 719 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 720 __ push(rbx); // save Method* 721 // Get mirror and store it in the frame as GC root for this Method* 722 __ load_mirror(rdx, rbx); 723 __ push(rdx); 724 if (ProfileInterpreter) { 725 Label method_data_continue; 726 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 727 __ testptr(rdx, rdx); 728 __ jcc(Assembler::zero, method_data_continue); 729 __ addptr(rdx, in_bytes(MethodData::data_offset())); 730 __ bind(method_data_continue); 731 __ push(rdx); // set the mdp (method data pointer) 732 } else { 733 __ push(0); 734 } 735 736 __ movptr(rdx, Address(rbx, Method::const_offset())); 737 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 738 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 739 __ push(rdx); // set constant pool cache 740 __ push(rlocals); // set locals pointer 741 if (native_call) { 742 __ push(0); // no bcp 743 } else { 744 __ push(rbcp); // set bcp 745 } 746 __ push(0); // reserve word for pointer to expression stack bottom 747 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 748 } 749 750 // End of helpers 751 752 // Method entry for java.lang.ref.Reference.get. 753 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 754 // Code: _aload_0, _getfield, _areturn 755 // parameter size = 1 756 // 757 // The code that gets generated by this routine is split into 2 parts: 758 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load, 759 // 2. The slow path - which is an expansion of the regular method entry. 760 // 761 // Notes:- 762 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed. 763 // * We may jump to the slow path iff the receiver is null. If the 764 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load 765 // Thus we can use the regular method entry code to generate the NPE. 766 // 767 // rbx: Method* 768 769 // r13: senderSP must preserve for slow path, set SP to it on fast path 770 771 address entry = __ pc(); 772 773 const int referent_offset = java_lang_ref_Reference::referent_offset; 774 guarantee(referent_offset > 0, "referent offset not initialized"); 775 776 Label slow_path; 777 // rbx: method 778 779 // Check if local 0 != NULL 780 // If the receiver is null then it is OK to jump to the slow path. 781 __ movptr(rax, Address(rsp, wordSize)); 782 783 __ testptr(rax, rax); 784 __ jcc(Assembler::zero, slow_path); 785 786 // rax: local 0 787 // rbx: method (but can be used as scratch now) 788 // rdx: scratch 789 // rdi: scratch 790 791 // Preserve the sender sp in case the load barrier 792 // calls the runtime 793 NOT_LP64(__ push(rsi)); 794 795 // Load the value of the referent field. 796 const Address field_address(rax, referent_offset); 797 __ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF); 798 799 // _areturn 800 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 801 NOT_LP64(__ pop(rsi)); // get sender sp 802 __ pop(rdi); // get return address 803 __ mov(rsp, sender_sp); // set sp to sender sp 804 __ jmp(rdi); 805 __ ret(0); 806 807 // generate a vanilla interpreter entry as the slow path 808 __ bind(slow_path); 809 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 810 return entry; 811 } 812 813 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 814 // Quick & dirty stack overflow checking: bang the stack & handle trap. 815 // Note that we do the banging after the frame is setup, since the exception 816 // handling code expects to find a valid interpreter frame on the stack. 817 // Doing the banging earlier fails if the caller frame is not an interpreter 818 // frame. 819 // (Also, the exception throwing code expects to unlock any synchronized 820 // method receiever, so do the banging after locking the receiver.) 821 822 // Bang each page in the shadow zone. We can't assume it's been done for 823 // an interpreter frame with greater than a page of locals, so each page 824 // needs to be checked. Only true for non-native. 825 if (UseStackBanging) { 826 const int page_size = os::vm_page_size(); 827 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 828 const int start_page = native_call ? n_shadow_pages : 1; 829 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 830 __ bang_stack_with_offset(pages*page_size); 831 } 832 } 833 } 834 835 // Interpreter stub for calling a native method. (asm interpreter) 836 // This sets up a somewhat different looking stack for calling the 837 // native method than the typical interpreter frame setup. 838 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 839 // determine code generation flags 840 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 841 842 // rbx: Method* 843 // rbcp: sender sp 844 845 address entry_point = __ pc(); 846 847 const Address constMethod (rbx, Method::const_offset()); 848 const Address access_flags (rbx, Method::access_flags_offset()); 849 const Address size_of_parameters(rcx, ConstMethod:: 850 size_of_parameters_offset()); 851 852 853 // get parameter size (always needed) 854 __ movptr(rcx, constMethod); 855 __ load_unsigned_short(rcx, size_of_parameters); 856 857 // native calls don't need the stack size check since they have no 858 // expression stack and the arguments are already on the stack and 859 // we only add a handful of words to the stack 860 861 // rbx: Method* 862 // rcx: size of parameters 863 // rbcp: sender sp 864 __ pop(rax); // get return address 865 866 // for natives the size of locals is zero 867 868 // compute beginning of parameters 869 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 870 871 // add 2 zero-initialized slots for native calls 872 // initialize result_handler slot 873 __ push((int) NULL_WORD); 874 // slot for oop temp 875 // (static native method holder mirror/jni oop result) 876 __ push((int) NULL_WORD); 877 878 // initialize fixed part of activation frame 879 generate_fixed_frame(true); 880 881 // make sure method is native & not abstract 882 #ifdef ASSERT 883 __ movl(rax, access_flags); 884 { 885 Label L; 886 __ testl(rax, JVM_ACC_NATIVE); 887 __ jcc(Assembler::notZero, L); 888 __ stop("tried to execute non-native method as native"); 889 __ bind(L); 890 } 891 { 892 Label L; 893 __ testl(rax, JVM_ACC_ABSTRACT); 894 __ jcc(Assembler::zero, L); 895 __ stop("tried to execute abstract method in interpreter"); 896 __ bind(L); 897 } 898 #endif 899 900 // Since at this point in the method invocation the exception handler 901 // would try to exit the monitor of synchronized methods which hasn't 902 // been entered yet, we set the thread local variable 903 // _do_not_unlock_if_synchronized to true. The remove_activation will 904 // check this flag. 905 906 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 907 NOT_LP64(__ get_thread(thread1)); 908 const Address do_not_unlock_if_synchronized(thread1, 909 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 910 __ movbool(do_not_unlock_if_synchronized, true); 911 912 // increment invocation count & check for overflow 913 Label invocation_counter_overflow; 914 if (inc_counter) { 915 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 916 } 917 918 Label continue_after_compile; 919 __ bind(continue_after_compile); 920 921 bang_stack_shadow_pages(true); 922 923 // reset the _do_not_unlock_if_synchronized flag 924 NOT_LP64(__ get_thread(thread1)); 925 __ movbool(do_not_unlock_if_synchronized, false); 926 927 // check for synchronized methods 928 // Must happen AFTER invocation_counter check and stack overflow check, 929 // so method is not locked if overflows. 930 if (synchronized) { 931 lock_method(); 932 } else { 933 // no synchronization necessary 934 #ifdef ASSERT 935 { 936 Label L; 937 __ movl(rax, access_flags); 938 __ testl(rax, JVM_ACC_SYNCHRONIZED); 939 __ jcc(Assembler::zero, L); 940 __ stop("method needs synchronization"); 941 __ bind(L); 942 } 943 #endif 944 } 945 946 // start execution 947 #ifdef ASSERT 948 { 949 Label L; 950 const Address monitor_block_top(rbp, 951 frame::interpreter_frame_monitor_block_top_offset * wordSize); 952 __ movptr(rax, monitor_block_top); 953 __ cmpptr(rax, rsp); 954 __ jcc(Assembler::equal, L); 955 __ stop("broken stack frame setup in interpreter"); 956 __ bind(L); 957 } 958 #endif 959 960 // jvmti support 961 __ notify_method_entry(); 962 963 // work registers 964 const Register method = rbx; 965 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 966 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 967 968 // allocate space for parameters 969 __ get_method(method); 970 __ movptr(t, Address(method, Method::const_offset())); 971 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 972 973 #ifndef _LP64 974 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 975 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 976 __ subptr(rsp, t); 977 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 978 #else 979 __ shll(t, Interpreter::logStackElementSize); 980 981 __ subptr(rsp, t); 982 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 983 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 984 #endif // _LP64 985 986 // get signature handler 987 { 988 Label L; 989 __ movptr(t, Address(method, Method::signature_handler_offset())); 990 __ testptr(t, t); 991 __ jcc(Assembler::notZero, L); 992 __ call_VM(noreg, 993 CAST_FROM_FN_PTR(address, 994 InterpreterRuntime::prepare_native_call), 995 method); 996 __ get_method(method); 997 __ movptr(t, Address(method, Method::signature_handler_offset())); 998 __ bind(L); 999 } 1000 1001 // call signature handler 1002 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 1003 "adjust this code"); 1004 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 1005 "adjust this code"); 1006 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 1007 "adjust this code"); 1008 1009 // The generated handlers do not touch RBX (the method oop). 1010 // However, large signatures cannot be cached and are generated 1011 // each time here. The slow-path generator can do a GC on return, 1012 // so we must reload it after the call. 1013 __ call(t); 1014 __ get_method(method); // slow path can do a GC, reload RBX 1015 1016 1017 // result handler is in rax 1018 // set result handler 1019 __ movptr(Address(rbp, 1020 (frame::interpreter_frame_result_handler_offset) * wordSize), 1021 rax); 1022 1023 // pass mirror handle if static call 1024 { 1025 Label L; 1026 __ movl(t, Address(method, Method::access_flags_offset())); 1027 __ testl(t, JVM_ACC_STATIC); 1028 __ jcc(Assembler::zero, L); 1029 // get mirror 1030 __ load_mirror(t, method, rax); 1031 // copy mirror into activation frame 1032 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1033 t); 1034 // pass handle to mirror 1035 #ifndef _LP64 1036 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1037 __ movptr(Address(rsp, wordSize), t); 1038 #else 1039 __ lea(c_rarg1, 1040 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1041 #endif // _LP64 1042 __ bind(L); 1043 } 1044 1045 // get native function entry point 1046 { 1047 Label L; 1048 __ movptr(rax, Address(method, Method::native_function_offset())); 1049 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1050 __ cmpptr(rax, unsatisfied.addr()); 1051 __ jcc(Assembler::notEqual, L); 1052 __ call_VM(noreg, 1053 CAST_FROM_FN_PTR(address, 1054 InterpreterRuntime::prepare_native_call), 1055 method); 1056 __ get_method(method); 1057 __ movptr(rax, Address(method, Method::native_function_offset())); 1058 __ bind(L); 1059 } 1060 1061 // pass JNIEnv 1062 #ifndef _LP64 1063 __ get_thread(thread); 1064 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1065 __ movptr(Address(rsp, 0), t); 1066 1067 // set_last_Java_frame_before_call 1068 // It is enough that the pc() 1069 // points into the right code segment. It does not have to be the correct return pc. 1070 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1071 #else 1072 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1073 1074 // It is enough that the pc() points into the right code 1075 // segment. It does not have to be the correct return pc. 1076 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1077 #endif // _LP64 1078 1079 // change thread state 1080 #ifdef ASSERT 1081 { 1082 Label L; 1083 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1084 __ cmpl(t, _thread_in_Java); 1085 __ jcc(Assembler::equal, L); 1086 __ stop("Wrong thread state in native stub"); 1087 __ bind(L); 1088 } 1089 #endif 1090 1091 // Change state to native 1092 1093 __ movl(Address(thread, JavaThread::thread_state_offset()), 1094 _thread_in_native); 1095 1096 // Call the native method. 1097 __ call(rax); 1098 // 32: result potentially in rdx:rax or ST0 1099 // 64: result potentially in rax or xmm0 1100 1101 // Verify or restore cpu control state after JNI call 1102 __ restore_cpu_control_state_after_jni(); 1103 1104 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1105 // in order to extract the result of a method call. If the order of these 1106 // pushes change or anything else is added to the stack then the code in 1107 // interpreter_frame_result must also change. 1108 1109 #ifndef _LP64 1110 // save potential result in ST(0) & rdx:rax 1111 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1112 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1113 // It is safe to do this push because state is _thread_in_native and return address will be found 1114 // via _last_native_pc and not via _last_jave_sp 1115 1116 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1117 // If the order changes or anything else is added to the stack the code in 1118 // interpreter_frame_result will have to be changed. 1119 1120 { Label L; 1121 Label push_double; 1122 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1123 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1124 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1125 float_handler.addr()); 1126 __ jcc(Assembler::equal, push_double); 1127 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1128 double_handler.addr()); 1129 __ jcc(Assembler::notEqual, L); 1130 __ bind(push_double); 1131 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1132 __ bind(L); 1133 } 1134 #else 1135 __ push(dtos); 1136 #endif // _LP64 1137 1138 __ push(ltos); 1139 1140 // change thread state 1141 NOT_LP64(__ get_thread(thread)); 1142 __ movl(Address(thread, JavaThread::thread_state_offset()), 1143 _thread_in_native_trans); 1144 1145 if (os::is_MP()) { 1146 if (UseMembar) { 1147 // Force this write out before the read below 1148 __ membar(Assembler::Membar_mask_bits( 1149 Assembler::LoadLoad | Assembler::LoadStore | 1150 Assembler::StoreLoad | Assembler::StoreStore)); 1151 } else { 1152 // Write serialization page so VM thread can do a pseudo remote membar. 1153 // We use the current thread pointer to calculate a thread specific 1154 // offset to write to within the page. This minimizes bus traffic 1155 // due to cache line collision. 1156 __ serialize_memory(thread, rcx); 1157 } 1158 } 1159 1160 #ifndef _LP64 1161 if (AlwaysRestoreFPU) { 1162 // Make sure the control word is correct. 1163 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1164 } 1165 #endif // _LP64 1166 1167 // check for safepoint operation in progress and/or pending suspend requests 1168 { 1169 Label Continue; 1170 Label slow_path; 1171 1172 #ifndef _LP64 1173 __ safepoint_poll(slow_path, thread, noreg); 1174 #else 1175 __ safepoint_poll(slow_path, r15_thread, rscratch1); 1176 #endif 1177 1178 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1179 __ jcc(Assembler::equal, Continue); 1180 __ bind(slow_path); 1181 1182 // Don't use call_VM as it will see a possible pending exception 1183 // and forward it and never return here preventing us from 1184 // clearing _last_native_pc down below. Also can't use 1185 // call_VM_leaf either as it will check to see if r13 & r14 are 1186 // preserved and correspond to the bcp/locals pointers. So we do a 1187 // runtime call by hand. 1188 // 1189 #ifndef _LP64 1190 __ push(thread); 1191 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1192 JavaThread::check_special_condition_for_native_trans))); 1193 __ increment(rsp, wordSize); 1194 __ get_thread(thread); 1195 #else 1196 __ mov(c_rarg0, r15_thread); 1197 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1198 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1199 __ andptr(rsp, -16); // align stack as required by ABI 1200 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1201 __ mov(rsp, r12); // restore sp 1202 __ reinit_heapbase(); 1203 #endif // _LP64 1204 __ bind(Continue); 1205 } 1206 1207 // change thread state 1208 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1209 1210 // reset_last_Java_frame 1211 __ reset_last_Java_frame(thread, true); 1212 1213 if (CheckJNICalls) { 1214 // clear_pending_jni_exception_check 1215 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); 1216 } 1217 1218 // reset handle block 1219 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1220 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1221 1222 // If result is an oop unbox and store it in frame where gc will see it 1223 // and result handler will pick it up 1224 1225 { 1226 Label no_oop, not_weak, store_result; 1227 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1228 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1229 __ jcc(Assembler::notEqual, no_oop); 1230 // retrieve result 1231 __ pop(ltos); 1232 // Unbox oop result, e.g. JNIHandles::resolve value. 1233 __ resolve_jobject(rax /* value */, 1234 thread /* thread */, 1235 t /* tmp */); 1236 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1237 // keep stack depth as expected by pushing oop which will eventually be discarded 1238 __ push(ltos); 1239 __ bind(no_oop); 1240 } 1241 1242 1243 { 1244 Label no_reguard; 1245 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1246 JavaThread::stack_guard_yellow_reserved_disabled); 1247 __ jcc(Assembler::notEqual, no_reguard); 1248 1249 __ pusha(); // XXX only save smashed registers 1250 #ifndef _LP64 1251 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1252 __ popa(); 1253 #else 1254 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1255 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1256 __ andptr(rsp, -16); // align stack as required by ABI 1257 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1258 __ mov(rsp, r12); // restore sp 1259 __ popa(); // XXX only restore smashed registers 1260 __ reinit_heapbase(); 1261 #endif // _LP64 1262 1263 __ bind(no_reguard); 1264 } 1265 1266 1267 // The method register is junk from after the thread_in_native transition 1268 // until here. Also can't call_VM until the bcp has been 1269 // restored. Need bcp for throwing exception below so get it now. 1270 __ get_method(method); 1271 1272 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1273 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1274 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1275 1276 // handle exceptions (exception handling will handle unlocking!) 1277 { 1278 Label L; 1279 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1280 __ jcc(Assembler::zero, L); 1281 // Note: At some point we may want to unify this with the code 1282 // used in call_VM_base(); i.e., we should use the 1283 // StubRoutines::forward_exception code. For now this doesn't work 1284 // here because the rsp is not correctly set at this point. 1285 __ MacroAssembler::call_VM(noreg, 1286 CAST_FROM_FN_PTR(address, 1287 InterpreterRuntime::throw_pending_exception)); 1288 __ should_not_reach_here(); 1289 __ bind(L); 1290 } 1291 1292 // do unlocking if necessary 1293 { 1294 Label L; 1295 __ movl(t, Address(method, Method::access_flags_offset())); 1296 __ testl(t, JVM_ACC_SYNCHRONIZED); 1297 __ jcc(Assembler::zero, L); 1298 // the code below should be shared with interpreter macro 1299 // assembler implementation 1300 { 1301 Label unlock; 1302 // BasicObjectLock will be first in list, since this is a 1303 // synchronized method. However, need to check that the object 1304 // has not been unlocked by an explicit monitorexit bytecode. 1305 const Address monitor(rbp, 1306 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1307 wordSize - (int)sizeof(BasicObjectLock))); 1308 1309 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1310 1311 // monitor expect in c_rarg1 for slow unlock path 1312 __ lea(regmon, monitor); // address of first monitor 1313 1314 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1315 __ testptr(t, t); 1316 __ jcc(Assembler::notZero, unlock); 1317 1318 // Entry already unlocked, need to throw exception 1319 __ MacroAssembler::call_VM(noreg, 1320 CAST_FROM_FN_PTR(address, 1321 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1322 __ should_not_reach_here(); 1323 1324 __ bind(unlock); 1325 __ unlock_object(regmon); 1326 } 1327 __ bind(L); 1328 } 1329 1330 // jvmti support 1331 // Note: This must happen _after_ handling/throwing any exceptions since 1332 // the exception handler code notifies the runtime of method exits 1333 // too. If this happens before, method entry/exit notifications are 1334 // not properly paired (was bug - gri 11/22/99). 1335 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1336 1337 // restore potential result in edx:eax, call result handler to 1338 // restore potential result in ST0 & handle result 1339 1340 __ pop(ltos); 1341 LP64_ONLY( __ pop(dtos)); 1342 1343 __ movptr(t, Address(rbp, 1344 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1345 __ call(t); 1346 1347 // remove activation 1348 __ movptr(t, Address(rbp, 1349 frame::interpreter_frame_sender_sp_offset * 1350 wordSize)); // get sender sp 1351 __ leave(); // remove frame anchor 1352 __ pop(rdi); // get return address 1353 __ mov(rsp, t); // set sp to sender sp 1354 __ jmp(rdi); 1355 1356 if (inc_counter) { 1357 // Handle overflow of counter and compile method 1358 __ bind(invocation_counter_overflow); 1359 generate_counter_overflow(continue_after_compile); 1360 } 1361 1362 return entry_point; 1363 } 1364 1365 // Abstract method entry 1366 // Attempt to execute abstract method. Throw exception 1367 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1368 1369 address entry_point = __ pc(); 1370 1371 // abstract method entry 1372 1373 // pop return address, reset last_sp to NULL 1374 __ empty_expression_stack(); 1375 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1376 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1377 1378 // throw exception 1379 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx); 1380 // the call_VM checks for exception, so we should never return here. 1381 __ should_not_reach_here(); 1382 1383 return entry_point; 1384 } 1385 1386 // 1387 // Generic interpreted method entry to (asm) interpreter 1388 // 1389 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1390 // determine code generation flags 1391 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1392 1393 // ebx: Method* 1394 // rbcp: sender sp 1395 address entry_point = __ pc(); 1396 1397 const Address constMethod(rbx, Method::const_offset()); 1398 const Address access_flags(rbx, Method::access_flags_offset()); 1399 const Address size_of_parameters(rdx, 1400 ConstMethod::size_of_parameters_offset()); 1401 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1402 1403 1404 // get parameter size (always needed) 1405 __ movptr(rdx, constMethod); 1406 __ load_unsigned_short(rcx, size_of_parameters); 1407 1408 // rbx: Method* 1409 // rcx: size of parameters 1410 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1411 1412 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1413 __ subl(rdx, rcx); // rdx = no. of additional locals 1414 1415 // YYY 1416 // __ incrementl(rdx); 1417 // __ andl(rdx, -2); 1418 1419 // see if we've got enough room on the stack for locals plus overhead. 1420 generate_stack_overflow_check(); 1421 1422 // get return address 1423 __ pop(rax); 1424 1425 // compute beginning of parameters 1426 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1427 1428 // rdx - # of additional locals 1429 // allocate space for locals 1430 // explicitly initialize locals 1431 { 1432 Label exit, loop; 1433 __ testl(rdx, rdx); 1434 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1435 __ bind(loop); 1436 __ push((int) NULL_WORD); // initialize local variables 1437 __ decrementl(rdx); // until everything initialized 1438 __ jcc(Assembler::greater, loop); 1439 __ bind(exit); 1440 } 1441 1442 // initialize fixed part of activation frame 1443 generate_fixed_frame(false); 1444 1445 // make sure method is not native & not abstract 1446 #ifdef ASSERT 1447 __ movl(rax, access_flags); 1448 { 1449 Label L; 1450 __ testl(rax, JVM_ACC_NATIVE); 1451 __ jcc(Assembler::zero, L); 1452 __ stop("tried to execute native method as non-native"); 1453 __ bind(L); 1454 } 1455 { 1456 Label L; 1457 __ testl(rax, JVM_ACC_ABSTRACT); 1458 __ jcc(Assembler::zero, L); 1459 __ stop("tried to execute abstract method in interpreter"); 1460 __ bind(L); 1461 } 1462 #endif 1463 1464 // Since at this point in the method invocation the exception 1465 // handler would try to exit the monitor of synchronized methods 1466 // which hasn't been entered yet, we set the thread local variable 1467 // _do_not_unlock_if_synchronized to true. The remove_activation 1468 // will check this flag. 1469 1470 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1471 NOT_LP64(__ get_thread(thread)); 1472 const Address do_not_unlock_if_synchronized(thread, 1473 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1474 __ movbool(do_not_unlock_if_synchronized, true); 1475 1476 __ profile_parameters_type(rax, rcx, rdx); 1477 // increment invocation count & check for overflow 1478 Label invocation_counter_overflow; 1479 Label profile_method; 1480 Label profile_method_continue; 1481 if (inc_counter) { 1482 generate_counter_incr(&invocation_counter_overflow, 1483 &profile_method, 1484 &profile_method_continue); 1485 if (ProfileInterpreter) { 1486 __ bind(profile_method_continue); 1487 } 1488 } 1489 1490 Label continue_after_compile; 1491 __ bind(continue_after_compile); 1492 1493 // check for synchronized interpreted methods 1494 bang_stack_shadow_pages(false); 1495 1496 // reset the _do_not_unlock_if_synchronized flag 1497 NOT_LP64(__ get_thread(thread)); 1498 __ movbool(do_not_unlock_if_synchronized, false); 1499 1500 // check for synchronized methods 1501 // Must happen AFTER invocation_counter check and stack overflow check, 1502 // so method is not locked if overflows. 1503 if (synchronized) { 1504 // Allocate monitor and lock method 1505 lock_method(); 1506 } else { 1507 // no synchronization necessary 1508 #ifdef ASSERT 1509 { 1510 Label L; 1511 __ movl(rax, access_flags); 1512 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1513 __ jcc(Assembler::zero, L); 1514 __ stop("method needs synchronization"); 1515 __ bind(L); 1516 } 1517 #endif 1518 } 1519 1520 // start execution 1521 #ifdef ASSERT 1522 { 1523 Label L; 1524 const Address monitor_block_top (rbp, 1525 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1526 __ movptr(rax, monitor_block_top); 1527 __ cmpptr(rax, rsp); 1528 __ jcc(Assembler::equal, L); 1529 __ stop("broken stack frame setup in interpreter"); 1530 __ bind(L); 1531 } 1532 #endif 1533 1534 // jvmti support 1535 __ notify_method_entry(); 1536 1537 __ dispatch_next(vtos); 1538 1539 // invocation counter overflow 1540 if (inc_counter) { 1541 if (ProfileInterpreter) { 1542 // We have decided to profile this method in the interpreter 1543 __ bind(profile_method); 1544 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1545 __ set_method_data_pointer_for_bcp(); 1546 __ get_method(rbx); 1547 __ jmp(profile_method_continue); 1548 } 1549 // Handle overflow of counter and compile method 1550 __ bind(invocation_counter_overflow); 1551 generate_counter_overflow(continue_after_compile); 1552 } 1553 1554 return entry_point; 1555 } 1556 1557 //----------------------------------------------------------------------------- 1558 // Exceptions 1559 1560 void TemplateInterpreterGenerator::generate_throw_exception() { 1561 // Entry point in previous activation (i.e., if the caller was 1562 // interpreted) 1563 Interpreter::_rethrow_exception_entry = __ pc(); 1564 // Restore sp to interpreter_frame_last_sp even though we are going 1565 // to empty the expression stack for the exception processing. 1566 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1567 // rax: exception 1568 // rdx: return address/pc that threw exception 1569 __ restore_bcp(); // r13/rsi points to call/send 1570 __ restore_locals(); 1571 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1572 // Entry point for exceptions thrown within interpreter code 1573 Interpreter::_throw_exception_entry = __ pc(); 1574 // expression stack is undefined here 1575 // rax: exception 1576 // r13/rsi: exception bcp 1577 __ verify_oop(rax); 1578 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1579 LP64_ONLY(__ mov(c_rarg1, rax)); 1580 1581 // expression stack must be empty before entering the VM in case of 1582 // an exception 1583 __ empty_expression_stack(); 1584 // find exception handler address and preserve exception oop 1585 __ call_VM(rdx, 1586 CAST_FROM_FN_PTR(address, 1587 InterpreterRuntime::exception_handler_for_exception), 1588 rarg); 1589 // rax: exception handler entry point 1590 // rdx: preserved exception oop 1591 // r13/rsi: bcp for exception handler 1592 __ push_ptr(rdx); // push exception which is now the only value on the stack 1593 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1594 1595 // If the exception is not handled in the current frame the frame is 1596 // removed and the exception is rethrown (i.e. exception 1597 // continuation is _rethrow_exception). 1598 // 1599 // Note: At this point the bci is still the bxi for the instruction 1600 // which caused the exception and the expression stack is 1601 // empty. Thus, for any VM calls at this point, GC will find a legal 1602 // oop map (with empty expression stack). 1603 1604 // In current activation 1605 // tos: exception 1606 // esi: exception bcp 1607 1608 // 1609 // JVMTI PopFrame support 1610 // 1611 1612 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1613 __ empty_expression_stack(); 1614 // Set the popframe_processing bit in pending_popframe_condition 1615 // indicating that we are currently handling popframe, so that 1616 // call_VMs that may happen later do not trigger new popframe 1617 // handling cycles. 1618 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1619 NOT_LP64(__ get_thread(thread)); 1620 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1621 __ orl(rdx, JavaThread::popframe_processing_bit); 1622 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1623 1624 { 1625 // Check to see whether we are returning to a deoptimized frame. 1626 // (The PopFrame call ensures that the caller of the popped frame is 1627 // either interpreted or compiled and deoptimizes it if compiled.) 1628 // In this case, we can't call dispatch_next() after the frame is 1629 // popped, but instead must save the incoming arguments and restore 1630 // them after deoptimization has occurred. 1631 // 1632 // Note that we don't compare the return PC against the 1633 // deoptimization blob's unpack entry because of the presence of 1634 // adapter frames in C2. 1635 Label caller_not_deoptimized; 1636 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1637 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1638 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1639 InterpreterRuntime::interpreter_contains), rarg); 1640 __ testl(rax, rax); 1641 __ jcc(Assembler::notZero, caller_not_deoptimized); 1642 1643 // Compute size of arguments for saving when returning to 1644 // deoptimized caller 1645 __ get_method(rax); 1646 __ movptr(rax, Address(rax, Method::const_offset())); 1647 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1648 size_of_parameters_offset()))); 1649 __ shll(rax, Interpreter::logStackElementSize); 1650 __ restore_locals(); 1651 __ subptr(rlocals, rax); 1652 __ addptr(rlocals, wordSize); 1653 // Save these arguments 1654 NOT_LP64(__ get_thread(thread)); 1655 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1656 Deoptimization:: 1657 popframe_preserve_args), 1658 thread, rax, rlocals); 1659 1660 __ remove_activation(vtos, rdx, 1661 /* throw_monitor_exception */ false, 1662 /* install_monitor_exception */ false, 1663 /* notify_jvmdi */ false); 1664 1665 // Inform deoptimization that it is responsible for restoring 1666 // these arguments 1667 NOT_LP64(__ get_thread(thread)); 1668 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1669 JavaThread::popframe_force_deopt_reexecution_bit); 1670 1671 // Continue in deoptimization handler 1672 __ jmp(rdx); 1673 1674 __ bind(caller_not_deoptimized); 1675 } 1676 1677 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1678 /* throw_monitor_exception */ false, 1679 /* install_monitor_exception */ false, 1680 /* notify_jvmdi */ false); 1681 1682 // Finish with popframe handling 1683 // A previous I2C followed by a deoptimization might have moved the 1684 // outgoing arguments further up the stack. PopFrame expects the 1685 // mutations to those outgoing arguments to be preserved and other 1686 // constraints basically require this frame to look exactly as 1687 // though it had previously invoked an interpreted activation with 1688 // no space between the top of the expression stack (current 1689 // last_sp) and the top of stack. Rather than force deopt to 1690 // maintain this kind of invariant all the time we call a small 1691 // fixup routine to move the mutated arguments onto the top of our 1692 // expression stack if necessary. 1693 #ifndef _LP64 1694 __ mov(rax, rsp); 1695 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1696 __ get_thread(thread); 1697 // PC must point into interpreter here 1698 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1699 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1700 __ get_thread(thread); 1701 #else 1702 __ mov(c_rarg1, rsp); 1703 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1704 // PC must point into interpreter here 1705 __ set_last_Java_frame(noreg, rbp, __ pc()); 1706 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1707 #endif 1708 __ reset_last_Java_frame(thread, true); 1709 1710 // Restore the last_sp and null it out 1711 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1712 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1713 1714 __ restore_bcp(); 1715 __ restore_locals(); 1716 // The method data pointer was incremented already during 1717 // call profiling. We have to restore the mdp for the current bcp. 1718 if (ProfileInterpreter) { 1719 __ set_method_data_pointer_for_bcp(); 1720 } 1721 1722 // Clear the popframe condition flag 1723 NOT_LP64(__ get_thread(thread)); 1724 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1725 JavaThread::popframe_inactive); 1726 1727 #if INCLUDE_JVMTI 1728 { 1729 Label L_done; 1730 const Register local0 = rlocals; 1731 1732 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1733 __ jcc(Assembler::notEqual, L_done); 1734 1735 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1736 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1737 1738 __ get_method(rdx); 1739 __ movptr(rax, Address(local0, 0)); 1740 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1741 1742 __ testptr(rax, rax); 1743 __ jcc(Assembler::zero, L_done); 1744 1745 __ movptr(Address(rbx, 0), rax); 1746 __ bind(L_done); 1747 } 1748 #endif // INCLUDE_JVMTI 1749 1750 __ dispatch_next(vtos); 1751 // end of PopFrame support 1752 1753 Interpreter::_remove_activation_entry = __ pc(); 1754 1755 // preserve exception over this code sequence 1756 __ pop_ptr(rax); 1757 NOT_LP64(__ get_thread(thread)); 1758 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1759 // remove the activation (without doing throws on illegalMonitorExceptions) 1760 __ remove_activation(vtos, rdx, false, true, false); 1761 // restore exception 1762 NOT_LP64(__ get_thread(thread)); 1763 __ get_vm_result(rax, thread); 1764 1765 // In between activations - previous activation type unknown yet 1766 // compute continuation point - the continuation point expects the 1767 // following registers set up: 1768 // 1769 // rax: exception 1770 // rdx: return address/pc that threw exception 1771 // rsp: expression stack of caller 1772 // rbp: ebp of caller 1773 __ push(rax); // save exception 1774 __ push(rdx); // save return address 1775 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1776 SharedRuntime::exception_handler_for_return_address), 1777 thread, rdx); 1778 __ mov(rbx, rax); // save exception handler 1779 __ pop(rdx); // restore return address 1780 __ pop(rax); // restore exception 1781 // Note that an "issuing PC" is actually the next PC after the call 1782 __ jmp(rbx); // jump to exception 1783 // handler of caller 1784 } 1785 1786 1787 // 1788 // JVMTI ForceEarlyReturn support 1789 // 1790 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1791 address entry = __ pc(); 1792 1793 __ restore_bcp(); 1794 __ restore_locals(); 1795 __ empty_expression_stack(); 1796 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1797 1798 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1799 NOT_LP64(__ get_thread(thread)); 1800 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1801 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1802 1803 // Clear the earlyret state 1804 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1805 1806 __ remove_activation(state, rsi, 1807 false, /* throw_monitor_exception */ 1808 false, /* install_monitor_exception */ 1809 true); /* notify_jvmdi */ 1810 __ jmp(rsi); 1811 1812 return entry; 1813 } // end of ForceEarlyReturn support 1814 1815 1816 //----------------------------------------------------------------------------- 1817 // Helper for vtos entry point generation 1818 1819 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1820 address& bep, 1821 address& cep, 1822 address& sep, 1823 address& aep, 1824 address& iep, 1825 address& lep, 1826 address& fep, 1827 address& dep, 1828 address& vep) { 1829 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1830 Label L; 1831 aep = __ pc(); // atos entry point 1832 __ push_ptr(); 1833 __ jmp(L); 1834 #ifndef _LP64 1835 fep = __ pc(); // ftos entry point 1836 __ push(ftos); 1837 __ jmp(L); 1838 dep = __ pc(); // dtos entry point 1839 __ push(dtos); 1840 __ jmp(L); 1841 #else 1842 fep = __ pc(); // ftos entry point 1843 __ push_f(xmm0); 1844 __ jmp(L); 1845 dep = __ pc(); // dtos entry point 1846 __ push_d(xmm0); 1847 __ jmp(L); 1848 #endif // _LP64 1849 lep = __ pc(); // ltos entry point 1850 __ push_l(); 1851 __ jmp(L); 1852 bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point 1853 __ push_i(); 1854 vep = __ pc(); // vtos entry point 1855 __ bind(L); 1856 generate_and_dispatch(t); 1857 } 1858 1859 //----------------------------------------------------------------------------- 1860 1861 // Non-product code 1862 #ifndef PRODUCT 1863 1864 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1865 address entry = __ pc(); 1866 1867 #ifndef _LP64 1868 // prepare expression stack 1869 __ pop(rcx); // pop return address so expression stack is 'pure' 1870 __ push(state); // save tosca 1871 1872 // pass tosca registers as arguments & call tracer 1873 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1874 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1875 __ pop(state); // restore tosca 1876 1877 // return 1878 __ jmp(rcx); 1879 #else 1880 __ push(state); 1881 __ push(c_rarg0); 1882 __ push(c_rarg1); 1883 __ push(c_rarg2); 1884 __ push(c_rarg3); 1885 __ mov(c_rarg2, rax); // Pass itos 1886 #ifdef _WIN64 1887 __ movflt(xmm3, xmm0); // Pass ftos 1888 #endif 1889 __ call_VM(noreg, 1890 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1891 c_rarg1, c_rarg2, c_rarg3); 1892 __ pop(c_rarg3); 1893 __ pop(c_rarg2); 1894 __ pop(c_rarg1); 1895 __ pop(c_rarg0); 1896 __ pop(state); 1897 __ ret(0); // return from result handler 1898 #endif // _LP64 1899 1900 return entry; 1901 } 1902 1903 void TemplateInterpreterGenerator::count_bytecode() { 1904 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1905 } 1906 1907 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1908 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1909 } 1910 1911 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1912 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1913 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1914 __ orl(rbx, 1915 ((int) t->bytecode()) << 1916 BytecodePairHistogram::log2_number_of_codes); 1917 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1918 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1919 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1920 } 1921 1922 1923 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1924 // Call a little run-time stub to avoid blow-up for each bytecode. 1925 // The run-time runtime saves the right registers, depending on 1926 // the tosca in-state for the given template. 1927 1928 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1929 "entry must have been generated"); 1930 #ifndef _LP64 1931 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1932 #else 1933 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1934 __ andptr(rsp, -16); // align stack as required by ABI 1935 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1936 __ mov(rsp, r12); // restore sp 1937 __ reinit_heapbase(); 1938 #endif // _LP64 1939 } 1940 1941 1942 void TemplateInterpreterGenerator::stop_interpreter_at() { 1943 Label L; 1944 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1945 StopInterpreterAt); 1946 __ jcc(Assembler::notEqual, L); 1947 __ int3(); 1948 __ bind(L); 1949 } 1950 #endif // !PRODUCT