1 /* 2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 47 #include "utilities/macros.hpp" 48 49 #define __ _masm-> 50 51 #ifndef CC_INTERP 52 53 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 54 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; 55 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 56 57 //----------------------------------------------------------------------------- 58 59 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 60 address entry = __ pc(); 61 62 #ifdef ASSERT 63 { 64 Label L; 65 __ lea(rax, Address(rbp, 66 frame::interpreter_frame_monitor_block_top_offset * 67 wordSize)); 68 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 69 // grows negative) 70 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 71 __ stop ("interpreter frame not set up"); 72 __ bind(L); 73 } 74 #endif // ASSERT 75 // Restore bcp under the assumption that the current frame is still 76 // interpreted 77 __ restore_bcp(); 78 79 // expression stack must be empty before entering the VM if an 80 // exception happened 81 __ empty_expression_stack(); 82 // throw exception 83 __ call_VM(noreg, 84 CAST_FROM_FN_PTR(address, 85 InterpreterRuntime::throw_StackOverflowError)); 86 return entry; 87 } 88 89 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 90 const char* name) { 91 address entry = __ pc(); 92 // expression stack must be empty before entering the VM if an 93 // exception happened 94 __ empty_expression_stack(); 95 // setup parameters 96 // ??? convention: expect aberrant index in register ebx 97 __ lea(c_rarg1, ExternalAddress((address)name)); 98 __ call_VM(noreg, 99 CAST_FROM_FN_PTR(address, 100 InterpreterRuntime:: 101 throw_ArrayIndexOutOfBoundsException), 102 c_rarg1, rbx); 103 return entry; 104 } 105 106 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 107 address entry = __ pc(); 108 109 // object is at TOS 110 __ pop(c_rarg1); 111 112 // expression stack must be empty before entering the VM if an 113 // exception happened 114 __ empty_expression_stack(); 115 116 __ call_VM(noreg, 117 CAST_FROM_FN_PTR(address, 118 InterpreterRuntime:: 119 throw_ClassCastException), 120 c_rarg1); 121 return entry; 122 } 123 124 address TemplateInterpreterGenerator::generate_exception_handler_common( 125 const char* name, const char* message, bool pass_oop) { 126 assert(!pass_oop || message == NULL, "either oop or message but not both"); 127 address entry = __ pc(); 128 if (pass_oop) { 129 // object is at TOS 130 __ pop(c_rarg2); 131 } 132 // expression stack must be empty before entering the VM if an 133 // exception happened 134 __ empty_expression_stack(); 135 // setup parameters 136 __ lea(c_rarg1, ExternalAddress((address)name)); 137 if (pass_oop) { 138 __ call_VM(rax, CAST_FROM_FN_PTR(address, 139 InterpreterRuntime:: 140 create_klass_exception), 141 c_rarg1, c_rarg2); 142 } else { 143 // kind of lame ExternalAddress can't take NULL because 144 // external_word_Relocation will assert. 145 if (message != NULL) { 146 __ lea(c_rarg2, ExternalAddress((address)message)); 147 } else { 148 __ movptr(c_rarg2, NULL_WORD); 149 } 150 __ call_VM(rax, 151 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 152 c_rarg1, c_rarg2); 153 } 154 // throw exception 155 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 156 return entry; 157 } 158 159 160 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 161 address entry = __ pc(); 162 // NULL last_sp until next java call 163 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 164 __ dispatch_next(state); 165 return entry; 166 } 167 168 169 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { 170 address entry = __ pc(); 171 172 // Restore stack bottom in case i2c adjusted stack 173 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 174 // and NULL it as marker that esp is now tos until next java call 175 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 176 177 __ restore_bcp(); 178 __ restore_locals(); 179 180 if (state == atos) { 181 Register mdp = rbx; 182 Register tmp = rcx; 183 __ profile_return_type(mdp, rax, tmp); 184 } 185 186 Label L_got_cache, L_giant_index; 187 if (EnableInvokeDynamic) { 188 __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic); 189 __ jcc(Assembler::equal, L_giant_index); 190 } 191 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); 192 __ bind(L_got_cache); 193 __ movl(rbx, Address(rbx, rcx, 194 Address::times_ptr, 195 in_bytes(ConstantPoolCache::base_offset()) + 196 3 * wordSize)); 197 __ andl(rbx, 0xFF); 198 __ lea(rsp, Address(rsp, rbx, Address::times_8)); 199 __ dispatch_next(state, step); 200 201 // out of the main line of code... 202 if (EnableInvokeDynamic) { 203 __ bind(L_giant_index); 204 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); 205 __ jmp(L_got_cache); 206 } 207 208 return entry; 209 } 210 211 212 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 213 int step) { 214 address entry = __ pc(); 215 // NULL last_sp until next java call 216 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 217 __ restore_bcp(); 218 __ restore_locals(); 219 // handle exceptions 220 { 221 Label L; 222 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 223 __ jcc(Assembler::zero, L); 224 __ call_VM(noreg, 225 CAST_FROM_FN_PTR(address, 226 InterpreterRuntime::throw_pending_exception)); 227 __ should_not_reach_here(); 228 __ bind(L); 229 } 230 __ dispatch_next(state, step); 231 return entry; 232 } 233 234 int AbstractInterpreter::BasicType_as_index(BasicType type) { 235 int i = 0; 236 switch (type) { 237 case T_BOOLEAN: i = 0; break; 238 case T_CHAR : i = 1; break; 239 case T_BYTE : i = 2; break; 240 case T_SHORT : i = 3; break; 241 case T_INT : i = 4; break; 242 case T_LONG : i = 5; break; 243 case T_VOID : i = 6; break; 244 case T_FLOAT : i = 7; break; 245 case T_DOUBLE : i = 8; break; 246 case T_OBJECT : i = 9; break; 247 case T_ARRAY : i = 9; break; 248 default : ShouldNotReachHere(); 249 } 250 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, 251 "index out of bounds"); 252 return i; 253 } 254 255 256 address TemplateInterpreterGenerator::generate_result_handler_for( 257 BasicType type) { 258 address entry = __ pc(); 259 switch (type) { 260 case T_BOOLEAN: __ c2bool(rax); break; 261 case T_CHAR : __ movzwl(rax, rax); break; 262 case T_BYTE : __ sign_extend_byte(rax); break; 263 case T_SHORT : __ sign_extend_short(rax); break; 264 case T_INT : /* nothing to do */ break; 265 case T_LONG : /* nothing to do */ break; 266 case T_VOID : /* nothing to do */ break; 267 case T_FLOAT : /* nothing to do */ break; 268 case T_DOUBLE : /* nothing to do */ break; 269 case T_OBJECT : 270 // retrieve result from frame 271 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 272 // and verify it 273 __ verify_oop(rax); 274 break; 275 default : ShouldNotReachHere(); 276 } 277 __ ret(0); // return from result handler 278 return entry; 279 } 280 281 address TemplateInterpreterGenerator::generate_safept_entry_for( 282 TosState state, 283 address runtime_entry) { 284 address entry = __ pc(); 285 __ push(state); 286 __ call_VM(noreg, runtime_entry); 287 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 288 return entry; 289 } 290 291 292 293 // Helpers for commoning out cases in the various type of method entries. 294 // 295 296 297 // increment invocation count & check for overflow 298 // 299 // Note: checking for negative value instead of overflow 300 // so we have a 'sticky' overflow test 301 // 302 // rbx: method 303 // ecx: invocation counter 304 // 305 void InterpreterGenerator::generate_counter_incr( 306 Label* overflow, 307 Label* profile_method, 308 Label* profile_method_continue) { 309 Label done; 310 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 311 if (TieredCompilation) { 312 int increment = InvocationCounter::count_increment; 313 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 314 Label no_mdo; 315 if (ProfileInterpreter) { 316 // Are we profiling? 317 __ movptr(rax, Address(rbx, Method::method_data_offset())); 318 __ testptr(rax, rax); 319 __ jccb(Assembler::zero, no_mdo); 320 // Increment counter in the MDO 321 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 322 in_bytes(InvocationCounter::counter_offset())); 323 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 324 __ jmp(done); 325 } 326 __ bind(no_mdo); 327 // Increment counter in MethodCounters 328 const Address invocation_counter(rax, 329 MethodCounters::invocation_counter_offset() + 330 InvocationCounter::counter_offset()); 331 __ get_method_counters(rbx, rax, done); 332 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 333 false, Assembler::zero, overflow); 334 __ bind(done); 335 } else { 336 const Address backedge_counter(rax, 337 MethodCounters::backedge_counter_offset() + 338 InvocationCounter::counter_offset()); 339 const Address invocation_counter(rax, 340 MethodCounters::invocation_counter_offset() + 341 InvocationCounter::counter_offset()); 342 343 __ get_method_counters(rbx, rax, done); 344 345 if (ProfileInterpreter) { 346 __ incrementl(Address(rax, 347 MethodCounters::interpreter_invocation_counter_offset())); 348 } 349 // Update standard invocation counters 350 __ movl(rcx, invocation_counter); 351 __ incrementl(rcx, InvocationCounter::count_increment); 352 __ movl(invocation_counter, rcx); // save invocation count 353 354 __ movl(rax, backedge_counter); // load backedge counter 355 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 356 357 __ addl(rcx, rax); // add both counters 358 359 // profile_method is non-null only for interpreted method so 360 // profile_method != NULL == !native_call 361 362 if (ProfileInterpreter && profile_method != NULL) { 363 // Test to see if we should create a method data oop 364 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 365 __ jcc(Assembler::less, *profile_method_continue); 366 367 // if no method data exists, go to profile_method 368 __ test_method_data_pointer(rax, *profile_method); 369 } 370 371 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 372 __ jcc(Assembler::aboveEqual, *overflow); 373 __ bind(done); 374 } 375 } 376 377 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 378 379 // Asm interpreter on entry 380 // r14 - locals 381 // r13 - bcp 382 // rbx - method 383 // edx - cpool --- DOES NOT APPEAR TO BE TRUE 384 // rbp - interpreter frame 385 386 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 387 // Everything as it was on entry 388 // rdx is not restored. Doesn't appear to really be set. 389 390 // InterpreterRuntime::frequency_counter_overflow takes two 391 // arguments, the first (thread) is passed by call_VM, the second 392 // indicates if the counter overflow occurs at a backwards branch 393 // (NULL bcp). We pass zero for it. The call returns the address 394 // of the verified entry point for the method or NULL if the 395 // compilation did not complete (either went background or bailed 396 // out). 397 __ movl(c_rarg1, 0); 398 __ call_VM(noreg, 399 CAST_FROM_FN_PTR(address, 400 InterpreterRuntime::frequency_counter_overflow), 401 c_rarg1); 402 403 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 404 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 405 // and jump to the interpreted entry. 406 __ jmp(*do_continue, relocInfo::none); 407 } 408 409 // See if we've got enough room on the stack for locals plus overhead. 410 // The expression stack grows down incrementally, so the normal guard 411 // page mechanism will work for that. 412 // 413 // NOTE: Since the additional locals are also always pushed (wasn't 414 // obvious in generate_method_entry) so the guard should work for them 415 // too. 416 // 417 // Args: 418 // rdx: number of additional locals this frame needs (what we must check) 419 // rbx: Method* 420 // 421 // Kills: 422 // rax 423 void InterpreterGenerator::generate_stack_overflow_check(void) { 424 425 // monitor entry size: see picture of stack set 426 // (generate_method_entry) and frame_amd64.hpp 427 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 428 429 // total overhead size: entry_size + (saved rbp through expr stack 430 // bottom). be sure to change this if you add/subtract anything 431 // to/from the overhead area 432 const int overhead_size = 433 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 434 435 const int page_size = os::vm_page_size(); 436 437 Label after_frame_check; 438 439 // see if the frame is greater than one page in size. If so, 440 // then we need to verify there is enough stack space remaining 441 // for the additional locals. 442 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 443 __ jcc(Assembler::belowEqual, after_frame_check); 444 445 // compute rsp as if this were going to be the last frame on 446 // the stack before the red zone 447 448 const Address stack_base(r15_thread, Thread::stack_base_offset()); 449 const Address stack_size(r15_thread, Thread::stack_size_offset()); 450 451 // locals + overhead, in bytes 452 __ mov(rax, rdx); 453 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter. 454 __ addptr(rax, overhead_size); 455 456 #ifdef ASSERT 457 Label stack_base_okay, stack_size_okay; 458 // verify that thread stack base is non-zero 459 __ cmpptr(stack_base, (int32_t)NULL_WORD); 460 __ jcc(Assembler::notEqual, stack_base_okay); 461 __ stop("stack base is zero"); 462 __ bind(stack_base_okay); 463 // verify that thread stack size is non-zero 464 __ cmpptr(stack_size, 0); 465 __ jcc(Assembler::notEqual, stack_size_okay); 466 __ stop("stack size is zero"); 467 __ bind(stack_size_okay); 468 #endif 469 470 // Add stack base to locals and subtract stack size 471 __ addptr(rax, stack_base); 472 __ subptr(rax, stack_size); 473 474 // Use the maximum number of pages we might bang. 475 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 476 (StackRedPages+StackYellowPages); 477 478 // add in the red and yellow zone sizes 479 __ addptr(rax, max_pages * page_size); 480 481 // check against the current stack bottom 482 __ cmpptr(rsp, rax); 483 __ jcc(Assembler::above, after_frame_check); 484 485 // Restore sender's sp as SP. This is necessary if the sender's 486 // frame is an extended compiled frame (see gen_c2i_adapter()) 487 // and safer anyway in case of JSR292 adaptations. 488 489 __ pop(rax); // return address must be moved if SP is changed 490 __ mov(rsp, r13); 491 __ push(rax); 492 493 // Note: the restored frame is not necessarily interpreted. 494 // Use the shared runtime version of the StackOverflowError. 495 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 496 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 497 498 // all done with frame size check 499 __ bind(after_frame_check); 500 } 501 502 // Allocate monitor and lock method (asm interpreter) 503 // 504 // Args: 505 // rbx: Method* 506 // r14: locals 507 // 508 // Kills: 509 // rax 510 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 511 // rscratch1, rscratch2 (scratch regs) 512 void InterpreterGenerator::lock_method(void) { 513 // synchronize method 514 const Address access_flags(rbx, Method::access_flags_offset()); 515 const Address monitor_block_top( 516 rbp, 517 frame::interpreter_frame_monitor_block_top_offset * wordSize); 518 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 519 520 #ifdef ASSERT 521 { 522 Label L; 523 __ movl(rax, access_flags); 524 __ testl(rax, JVM_ACC_SYNCHRONIZED); 525 __ jcc(Assembler::notZero, L); 526 __ stop("method doesn't need synchronization"); 527 __ bind(L); 528 } 529 #endif // ASSERT 530 531 // get synchronization object 532 { 533 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 534 Label done; 535 __ movl(rax, access_flags); 536 __ testl(rax, JVM_ACC_STATIC); 537 // get receiver (assume this is frequent case) 538 __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0))); 539 __ jcc(Assembler::zero, done); 540 __ movptr(rax, Address(rbx, Method::const_offset())); 541 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); 542 __ movptr(rax, Address(rax, 543 ConstantPool::pool_holder_offset_in_bytes())); 544 __ movptr(rax, Address(rax, mirror_offset)); 545 546 #ifdef ASSERT 547 { 548 Label L; 549 __ testptr(rax, rax); 550 __ jcc(Assembler::notZero, L); 551 __ stop("synchronization object is NULL"); 552 __ bind(L); 553 } 554 #endif // ASSERT 555 556 __ bind(done); 557 } 558 559 // add space for monitor & lock 560 __ subptr(rsp, entry_size); // add space for a monitor entry 561 __ movptr(monitor_block_top, rsp); // set new monitor block top 562 // store object 563 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 564 __ movptr(c_rarg1, rsp); // object address 565 __ lock_object(c_rarg1); 566 } 567 568 // Generate a fixed interpreter frame. This is identical setup for 569 // interpreted methods and for native methods hence the shared code. 570 // 571 // Args: 572 // rax: return address 573 // rbx: Method* 574 // r14: pointer to locals 575 // r13: sender sp 576 // rdx: cp cache 577 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 578 // initialize fixed part of activation frame 579 __ push(rax); // save return address 580 __ enter(); // save old & set new rbp 581 __ push(r13); // set sender sp 582 __ push((int)NULL_WORD); // leave last_sp as null 583 __ movptr(r13, Address(rbx, Method::const_offset())); // get ConstMethod* 584 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase 585 __ push(rbx); // save Method* 586 if (ProfileInterpreter) { 587 Label method_data_continue; 588 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 589 __ testptr(rdx, rdx); 590 __ jcc(Assembler::zero, method_data_continue); 591 __ addptr(rdx, in_bytes(MethodData::data_offset())); 592 __ bind(method_data_continue); 593 __ push(rdx); // set the mdp (method data pointer) 594 } else { 595 __ push(0); 596 } 597 598 __ movptr(rdx, Address(rbx, Method::const_offset())); 599 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 600 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 601 __ push(rdx); // set constant pool cache 602 __ push(r14); // set locals pointer 603 if (native_call) { 604 __ push(0); // no bcp 605 } else { 606 __ push(r13); // set bcp 607 } 608 __ push(0); // reserve word for pointer to expression stack bottom 609 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 610 } 611 612 // End of helpers 613 614 // Various method entries 615 //------------------------------------------------------------------------------------------------------------------------ 616 // 617 // 618 619 // Call an accessor method (assuming it is resolved, otherwise drop 620 // into vanilla (slow path) entry 621 address InterpreterGenerator::generate_accessor_entry(void) { 622 // rbx: Method* 623 624 // r13: senderSP must preserver for slow path, set SP to it on fast path 625 626 address entry_point = __ pc(); 627 Label xreturn_path; 628 629 // do fastpath for resolved accessor methods 630 if (UseFastAccessorMethods) { 631 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites 632 // thereof; parameter size = 1 633 // Note: We can only use this code if the getfield has been resolved 634 // and if we don't have a null-pointer exception => check for 635 // these conditions first and use slow path if necessary. 636 Label slow_path; 637 // If we need a safepoint check, generate full interpreter entry. 638 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 639 SafepointSynchronize::_not_synchronized); 640 641 __ jcc(Assembler::notEqual, slow_path); 642 // rbx: method 643 __ movptr(rax, Address(rsp, wordSize)); 644 645 // check if local 0 != NULL and read field 646 __ testptr(rax, rax); 647 __ jcc(Assembler::zero, slow_path); 648 649 // read first instruction word and extract bytecode @ 1 and index @ 2 650 __ movptr(rdx, Address(rbx, Method::const_offset())); 651 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset())); 652 __ movl(rdx, Address(rdx, ConstMethod::codes_offset())); 653 // Shift codes right to get the index on the right. 654 // The bytecode fetched looks like <index><0xb4><0x2a> 655 __ shrl(rdx, 2 * BitsPerByte); 656 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); 657 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes())); 658 659 // rax: local 0 660 // rbx: method 661 // rdx: constant pool cache index 662 // rdi: constant pool cache 663 664 // check if getfield has been resolved and read constant pool cache entry 665 // check the validity of the cache entry by testing whether _indices field 666 // contains Bytecode::_getfield in b1 byte. 667 assert(in_words(ConstantPoolCacheEntry::size()) == 4, 668 "adjust shift below"); 669 __ movl(rcx, 670 Address(rdi, 671 rdx, 672 Address::times_8, 673 ConstantPoolCache::base_offset() + 674 ConstantPoolCacheEntry::indices_offset())); 675 __ shrl(rcx, 2 * BitsPerByte); 676 __ andl(rcx, 0xFF); 677 __ cmpl(rcx, Bytecodes::_getfield); 678 __ jcc(Assembler::notEqual, slow_path); 679 680 // Note: constant pool entry is not valid before bytecode is resolved 681 __ movptr(rcx, 682 Address(rdi, 683 rdx, 684 Address::times_8, 685 ConstantPoolCache::base_offset() + 686 ConstantPoolCacheEntry::f2_offset())); 687 // edx: flags 688 __ movl(rdx, 689 Address(rdi, 690 rdx, 691 Address::times_8, 692 ConstantPoolCache::base_offset() + 693 ConstantPoolCacheEntry::flags_offset())); 694 695 Label notObj, notInt, notByte, notShort; 696 const Address field_address(rax, rcx, Address::times_1); 697 698 // Need to differentiate between igetfield, agetfield, bgetfield etc. 699 // because they are different sizes. 700 // Use the type from the constant pool cache 701 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); 702 // Make sure we don't need to mask edx after the above shift 703 ConstantPoolCacheEntry::verify_tos_state_shift(); 704 705 __ cmpl(rdx, atos); 706 __ jcc(Assembler::notEqual, notObj); 707 // atos 708 __ load_heap_oop(rax, field_address); 709 __ jmp(xreturn_path); 710 711 __ bind(notObj); 712 __ cmpl(rdx, itos); 713 __ jcc(Assembler::notEqual, notInt); 714 // itos 715 __ movl(rax, field_address); 716 __ jmp(xreturn_path); 717 718 __ bind(notInt); 719 __ cmpl(rdx, btos); 720 __ jcc(Assembler::notEqual, notByte); 721 // btos 722 __ load_signed_byte(rax, field_address); 723 __ jmp(xreturn_path); 724 725 __ bind(notByte); 726 __ cmpl(rdx, stos); 727 __ jcc(Assembler::notEqual, notShort); 728 // stos 729 __ load_signed_short(rax, field_address); 730 __ jmp(xreturn_path); 731 732 __ bind(notShort); 733 #ifdef ASSERT 734 Label okay; 735 __ cmpl(rdx, ctos); 736 __ jcc(Assembler::equal, okay); 737 __ stop("what type is this?"); 738 __ bind(okay); 739 #endif 740 // ctos 741 __ load_unsigned_short(rax, field_address); 742 743 __ bind(xreturn_path); 744 745 // _ireturn/_areturn 746 __ pop(rdi); 747 __ mov(rsp, r13); 748 __ jmp(rdi); 749 __ ret(0); 750 751 // generate a vanilla interpreter entry as the slow path 752 __ bind(slow_path); 753 (void) generate_normal_entry(false); 754 } else { 755 (void) generate_normal_entry(false); 756 } 757 758 return entry_point; 759 } 760 761 // Method entry for java.lang.ref.Reference.get. 762 address InterpreterGenerator::generate_Reference_get_entry(void) { 763 #if INCLUDE_ALL_GCS 764 // Code: _aload_0, _getfield, _areturn 765 // parameter size = 1 766 // 767 // The code that gets generated by this routine is split into 2 parts: 768 // 1. The "intrinsified" code for G1 (or any SATB based GC), 769 // 2. The slow path - which is an expansion of the regular method entry. 770 // 771 // Notes:- 772 // * In the G1 code we do not check whether we need to block for 773 // a safepoint. If G1 is enabled then we must execute the specialized 774 // code for Reference.get (except when the Reference object is null) 775 // so that we can log the value in the referent field with an SATB 776 // update buffer. 777 // If the code for the getfield template is modified so that the 778 // G1 pre-barrier code is executed when the current method is 779 // Reference.get() then going through the normal method entry 780 // will be fine. 781 // * The G1 code can, however, check the receiver object (the instance 782 // of java.lang.Reference) and jump to the slow path if null. If the 783 // Reference object is null then we obviously cannot fetch the referent 784 // and so we don't need to call the G1 pre-barrier. Thus we can use the 785 // regular method entry code to generate the NPE. 786 // 787 // This code is based on generate_accessor_enty. 788 // 789 // rbx: Method* 790 791 // r13: senderSP must preserve for slow path, set SP to it on fast path 792 793 address entry = __ pc(); 794 795 const int referent_offset = java_lang_ref_Reference::referent_offset; 796 guarantee(referent_offset > 0, "referent offset not initialized"); 797 798 if (UseG1GC) { 799 Label slow_path; 800 // rbx: method 801 802 // Check if local 0 != NULL 803 // If the receiver is null then it is OK to jump to the slow path. 804 __ movptr(rax, Address(rsp, wordSize)); 805 806 __ testptr(rax, rax); 807 __ jcc(Assembler::zero, slow_path); 808 809 // rax: local 0 810 // rbx: method (but can be used as scratch now) 811 // rdx: scratch 812 // rdi: scratch 813 814 // Generate the G1 pre-barrier code to log the value of 815 // the referent field in an SATB buffer. 816 817 // Load the value of the referent field. 818 const Address field_address(rax, referent_offset); 819 __ load_heap_oop(rax, field_address); 820 821 // Generate the G1 pre-barrier code to log the value of 822 // the referent field in an SATB buffer. 823 __ g1_write_barrier_pre(noreg /* obj */, 824 rax /* pre_val */, 825 r15_thread /* thread */, 826 rbx /* tmp */, 827 true /* tosca_live */, 828 true /* expand_call */); 829 830 // _areturn 831 __ pop(rdi); // get return address 832 __ mov(rsp, r13); // set sp to sender sp 833 __ jmp(rdi); 834 __ ret(0); 835 836 // generate a vanilla interpreter entry as the slow path 837 __ bind(slow_path); 838 (void) generate_normal_entry(false); 839 840 return entry; 841 } 842 #endif // INCLUDE_ALL_GCS 843 844 // If G1 is not enabled then attempt to go through the accessor entry point 845 // Reference.get is an accessor 846 return generate_accessor_entry(); 847 } 848 849 /** 850 * Method entry for static native methods: 851 * int java.util.zip.CRC32.update(int crc, int b) 852 */ 853 address InterpreterGenerator::generate_CRC32_update_entry() { 854 if (UseCRC32Intrinsics) { 855 address entry = __ pc(); 856 857 // rbx,: Method* 858 // r13: senderSP must preserved for slow path, set SP to it on fast path 859 // c_rarg0: scratch (rdi on non-Win64, rcx on Win64) 860 // c_rarg1: scratch (rsi on non-Win64, rdx on Win64) 861 862 Label slow_path; 863 // If we need a safepoint check, generate full interpreter entry. 864 ExternalAddress state(SafepointSynchronize::address_of_state()); 865 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 866 SafepointSynchronize::_not_synchronized); 867 __ jcc(Assembler::notEqual, slow_path); 868 869 // We don't generate local frame and don't align stack because 870 // we call stub code and there is no safepoint on this path. 871 872 // Load parameters 873 const Register crc = rax; // crc 874 const Register val = c_rarg0; // source java byte value 875 const Register tbl = c_rarg1; // scratch 876 877 // Arguments are reversed on java expression stack 878 __ movl(val, Address(rsp, wordSize)); // byte value 879 __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC 880 881 __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); 882 __ notl(crc); // ~crc 883 __ update_byte_crc32(crc, val, tbl); 884 __ notl(crc); // ~crc 885 // result in rax 886 887 // _areturn 888 __ pop(rdi); // get return address 889 __ mov(rsp, r13); // set sp to sender sp 890 __ jmp(rdi); 891 892 // generate a vanilla native entry as the slow path 893 __ bind(slow_path); 894 895 (void) generate_native_entry(false); 896 897 return entry; 898 } 899 return generate_native_entry(false); 900 } 901 902 /** 903 * Method entry for static native methods: 904 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 905 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 906 */ 907 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 908 if (UseCRC32Intrinsics) { 909 address entry = __ pc(); 910 911 // rbx,: Method* 912 // r13: senderSP must preserved for slow path, set SP to it on fast path 913 914 Label slow_path; 915 // If we need a safepoint check, generate full interpreter entry. 916 ExternalAddress state(SafepointSynchronize::address_of_state()); 917 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 918 SafepointSynchronize::_not_synchronized); 919 __ jcc(Assembler::notEqual, slow_path); 920 921 // We don't generate local frame and don't align stack because 922 // we call stub code and there is no safepoint on this path. 923 924 // Load parameters 925 const Register crc = c_rarg0; // crc 926 const Register buf = c_rarg1; // source java byte array address 927 const Register len = c_rarg2; // length 928 const Register off = len; // offset (never overlaps with 'len') 929 930 // Arguments are reversed on java expression stack 931 // Calculate address of start element 932 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 933 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf 934 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset 935 __ addq(buf, off); // + offset 936 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC 937 } else { 938 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array 939 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 940 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset 941 __ addq(buf, off); // + offset 942 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC 943 } 944 // Can now load 'len' since we're finished with 'off' 945 __ movl(len, Address(rsp, wordSize)); // Length 946 947 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); 948 // result in rax 949 950 // _areturn 951 __ pop(rdi); // get return address 952 __ mov(rsp, r13); // set sp to sender sp 953 __ jmp(rdi); 954 955 // generate a vanilla native entry as the slow path 956 __ bind(slow_path); 957 958 (void) generate_native_entry(false); 959 960 return entry; 961 } 962 return generate_native_entry(false); 963 } 964 965 // Interpreter stub for calling a native method. (asm interpreter) 966 // This sets up a somewhat different looking stack for calling the 967 // native method than the typical interpreter frame setup. 968 address InterpreterGenerator::generate_native_entry(bool synchronized) { 969 // determine code generation flags 970 bool inc_counter = UseCompiler || CountCompiledCalls; 971 972 // rbx: Method* 973 // r13: sender sp 974 975 address entry_point = __ pc(); 976 977 const Address constMethod (rbx, Method::const_offset()); 978 const Address access_flags (rbx, Method::access_flags_offset()); 979 const Address size_of_parameters(rcx, ConstMethod:: 980 size_of_parameters_offset()); 981 982 983 // get parameter size (always needed) 984 __ movptr(rcx, constMethod); 985 __ load_unsigned_short(rcx, size_of_parameters); 986 987 // native calls don't need the stack size check since they have no 988 // expression stack and the arguments are already on the stack and 989 // we only add a handful of words to the stack 990 991 // rbx: Method* 992 // rcx: size of parameters 993 // r13: sender sp 994 __ pop(rax); // get return address 995 996 // for natives the size of locals is zero 997 998 // compute beginning of parameters (r14) 999 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); 1000 1001 // add 2 zero-initialized slots for native calls 1002 // initialize result_handler slot 1003 __ push((int) NULL_WORD); 1004 // slot for oop temp 1005 // (static native method holder mirror/jni oop result) 1006 __ push((int) NULL_WORD); 1007 1008 // initialize fixed part of activation frame 1009 generate_fixed_frame(true); 1010 1011 // make sure method is native & not abstract 1012 #ifdef ASSERT 1013 __ movl(rax, access_flags); 1014 { 1015 Label L; 1016 __ testl(rax, JVM_ACC_NATIVE); 1017 __ jcc(Assembler::notZero, L); 1018 __ stop("tried to execute non-native method as native"); 1019 __ bind(L); 1020 } 1021 { 1022 Label L; 1023 __ testl(rax, JVM_ACC_ABSTRACT); 1024 __ jcc(Assembler::zero, L); 1025 __ stop("tried to execute abstract method in interpreter"); 1026 __ bind(L); 1027 } 1028 #endif 1029 1030 // Since at this point in the method invocation the exception handler 1031 // would try to exit the monitor of synchronized methods which hasn't 1032 // been entered yet, we set the thread local variable 1033 // _do_not_unlock_if_synchronized to true. The remove_activation will 1034 // check this flag. 1035 1036 const Address do_not_unlock_if_synchronized(r15_thread, 1037 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1038 __ movbool(do_not_unlock_if_synchronized, true); 1039 1040 // increment invocation count & check for overflow 1041 Label invocation_counter_overflow; 1042 if (inc_counter) { 1043 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1044 } 1045 1046 Label continue_after_compile; 1047 __ bind(continue_after_compile); 1048 1049 bang_stack_shadow_pages(true); 1050 1051 // reset the _do_not_unlock_if_synchronized flag 1052 __ movbool(do_not_unlock_if_synchronized, false); 1053 1054 // check for synchronized methods 1055 // Must happen AFTER invocation_counter check and stack overflow check, 1056 // so method is not locked if overflows. 1057 if (synchronized) { 1058 lock_method(); 1059 } else { 1060 // no synchronization necessary 1061 #ifdef ASSERT 1062 { 1063 Label L; 1064 __ movl(rax, access_flags); 1065 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1066 __ jcc(Assembler::zero, L); 1067 __ stop("method needs synchronization"); 1068 __ bind(L); 1069 } 1070 #endif 1071 } 1072 1073 // start execution 1074 #ifdef ASSERT 1075 { 1076 Label L; 1077 const Address monitor_block_top(rbp, 1078 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1079 __ movptr(rax, monitor_block_top); 1080 __ cmpptr(rax, rsp); 1081 __ jcc(Assembler::equal, L); 1082 __ stop("broken stack frame setup in interpreter"); 1083 __ bind(L); 1084 } 1085 #endif 1086 1087 // jvmti support 1088 __ notify_method_entry(); 1089 1090 // work registers 1091 const Register method = rbx; 1092 const Register t = r11; 1093 1094 // allocate space for parameters 1095 __ get_method(method); 1096 __ movptr(t, Address(method, Method::const_offset())); 1097 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1098 __ shll(t, Interpreter::logStackElementSize); 1099 1100 __ subptr(rsp, t); 1101 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1102 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 1103 1104 // get signature handler 1105 { 1106 Label L; 1107 __ movptr(t, Address(method, Method::signature_handler_offset())); 1108 __ testptr(t, t); 1109 __ jcc(Assembler::notZero, L); 1110 __ call_VM(noreg, 1111 CAST_FROM_FN_PTR(address, 1112 InterpreterRuntime::prepare_native_call), 1113 method); 1114 __ get_method(method); 1115 __ movptr(t, Address(method, Method::signature_handler_offset())); 1116 __ bind(L); 1117 } 1118 1119 // call signature handler 1120 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14, 1121 "adjust this code"); 1122 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 1123 "adjust this code"); 1124 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, 1125 "adjust this code"); 1126 1127 // The generated handlers do not touch RBX (the method oop). 1128 // However, large signatures cannot be cached and are generated 1129 // each time here. The slow-path generator can do a GC on return, 1130 // so we must reload it after the call. 1131 __ call(t); 1132 __ get_method(method); // slow path can do a GC, reload RBX 1133 1134 1135 // result handler is in rax 1136 // set result handler 1137 __ movptr(Address(rbp, 1138 (frame::interpreter_frame_result_handler_offset) * wordSize), 1139 rax); 1140 1141 // pass mirror handle if static call 1142 { 1143 Label L; 1144 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1145 __ movl(t, Address(method, Method::access_flags_offset())); 1146 __ testl(t, JVM_ACC_STATIC); 1147 __ jcc(Assembler::zero, L); 1148 // get mirror 1149 __ movptr(t, Address(method, Method::const_offset())); 1150 __ movptr(t, Address(t, ConstMethod::constants_offset())); 1151 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 1152 __ movptr(t, Address(t, mirror_offset)); 1153 // copy mirror into activation frame 1154 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1155 t); 1156 // pass handle to mirror 1157 __ lea(c_rarg1, 1158 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1159 __ bind(L); 1160 } 1161 1162 // get native function entry point 1163 { 1164 Label L; 1165 __ movptr(rax, Address(method, Method::native_function_offset())); 1166 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1167 __ movptr(rscratch2, unsatisfied.addr()); 1168 __ cmpptr(rax, rscratch2); 1169 __ jcc(Assembler::notEqual, L); 1170 __ call_VM(noreg, 1171 CAST_FROM_FN_PTR(address, 1172 InterpreterRuntime::prepare_native_call), 1173 method); 1174 __ get_method(method); 1175 __ movptr(rax, Address(method, Method::native_function_offset())); 1176 __ bind(L); 1177 } 1178 1179 // pass JNIEnv 1180 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1181 1182 // It is enough that the pc() points into the right code 1183 // segment. It does not have to be the correct return pc. 1184 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1185 1186 // change thread state 1187 #ifdef ASSERT 1188 { 1189 Label L; 1190 __ movl(t, Address(r15_thread, JavaThread::thread_state_offset())); 1191 __ cmpl(t, _thread_in_Java); 1192 __ jcc(Assembler::equal, L); 1193 __ stop("Wrong thread state in native stub"); 1194 __ bind(L); 1195 } 1196 #endif 1197 1198 // Change state to native 1199 1200 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), 1201 _thread_in_native); 1202 1203 // Call the native method. 1204 __ call(rax); 1205 // result potentially in rax or xmm0 1206 1207 // Verify or restore cpu control state after JNI call 1208 __ restore_cpu_control_state_after_jni(); 1209 1210 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1211 // in order to extract the result of a method call. If the order of these 1212 // pushes change or anything else is added to the stack then the code in 1213 // interpreter_frame_result must also change. 1214 1215 __ push(dtos); 1216 __ push(ltos); 1217 1218 // change thread state 1219 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), 1220 _thread_in_native_trans); 1221 1222 if (os::is_MP()) { 1223 if (UseMembar) { 1224 // Force this write out before the read below 1225 __ membar(Assembler::Membar_mask_bits( 1226 Assembler::LoadLoad | Assembler::LoadStore | 1227 Assembler::StoreLoad | Assembler::StoreStore)); 1228 } else { 1229 // Write serialization page so VM thread can do a pseudo remote membar. 1230 // We use the current thread pointer to calculate a thread specific 1231 // offset to write to within the page. This minimizes bus traffic 1232 // due to cache line collision. 1233 __ serialize_memory(r15_thread, rscratch2); 1234 } 1235 } 1236 1237 // check for safepoint operation in progress and/or pending suspend requests 1238 { 1239 Label Continue; 1240 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1241 SafepointSynchronize::_not_synchronized); 1242 1243 Label L; 1244 __ jcc(Assembler::notEqual, L); 1245 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0); 1246 __ jcc(Assembler::equal, Continue); 1247 __ bind(L); 1248 1249 // Don't use call_VM as it will see a possible pending exception 1250 // and forward it and never return here preventing us from 1251 // clearing _last_native_pc down below. Also can't use 1252 // call_VM_leaf either as it will check to see if r13 & r14 are 1253 // preserved and correspond to the bcp/locals pointers. So we do a 1254 // runtime call by hand. 1255 // 1256 __ mov(c_rarg0, r15_thread); 1257 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1258 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1259 __ andptr(rsp, -16); // align stack as required by ABI 1260 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1261 __ mov(rsp, r12); // restore sp 1262 __ reinit_heapbase(); 1263 __ bind(Continue); 1264 } 1265 1266 // change thread state 1267 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); 1268 1269 // reset_last_Java_frame 1270 __ reset_last_Java_frame(true, true); 1271 1272 // reset handle block 1273 __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset())); 1274 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1275 1276 // If result is an oop unbox and store it in frame where gc will see it 1277 // and result handler will pick it up 1278 1279 { 1280 Label no_oop, store_result; 1281 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1282 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1283 __ jcc(Assembler::notEqual, no_oop); 1284 // retrieve result 1285 __ pop(ltos); 1286 __ testptr(rax, rax); 1287 __ jcc(Assembler::zero, store_result); 1288 __ movptr(rax, Address(rax, 0)); 1289 __ bind(store_result); 1290 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1291 // keep stack depth as expected by pushing oop which will eventually be discarde 1292 __ push(ltos); 1293 __ bind(no_oop); 1294 } 1295 1296 1297 { 1298 Label no_reguard; 1299 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), 1300 JavaThread::stack_guard_yellow_disabled); 1301 __ jcc(Assembler::notEqual, no_reguard); 1302 1303 __ pusha(); // XXX only save smashed registers 1304 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1305 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1306 __ andptr(rsp, -16); // align stack as required by ABI 1307 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1308 __ mov(rsp, r12); // restore sp 1309 __ popa(); // XXX only restore smashed registers 1310 __ reinit_heapbase(); 1311 1312 __ bind(no_reguard); 1313 } 1314 1315 1316 // The method register is junk from after the thread_in_native transition 1317 // until here. Also can't call_VM until the bcp has been 1318 // restored. Need bcp for throwing exception below so get it now. 1319 __ get_method(method); 1320 1321 // restore r13 to have legal interpreter frame, i.e., bci == 0 <=> 1322 // r13 == code_base() 1323 __ movptr(r13, Address(method, Method::const_offset())); // get ConstMethod* 1324 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase 1325 // handle exceptions (exception handling will handle unlocking!) 1326 { 1327 Label L; 1328 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1329 __ jcc(Assembler::zero, L); 1330 // Note: At some point we may want to unify this with the code 1331 // used in call_VM_base(); i.e., we should use the 1332 // StubRoutines::forward_exception code. For now this doesn't work 1333 // here because the rsp is not correctly set at this point. 1334 __ MacroAssembler::call_VM(noreg, 1335 CAST_FROM_FN_PTR(address, 1336 InterpreterRuntime::throw_pending_exception)); 1337 __ should_not_reach_here(); 1338 __ bind(L); 1339 } 1340 1341 // do unlocking if necessary 1342 { 1343 Label L; 1344 __ movl(t, Address(method, Method::access_flags_offset())); 1345 __ testl(t, JVM_ACC_SYNCHRONIZED); 1346 __ jcc(Assembler::zero, L); 1347 // the code below should be shared with interpreter macro 1348 // assembler implementation 1349 { 1350 Label unlock; 1351 // BasicObjectLock will be first in list, since this is a 1352 // synchronized method. However, need to check that the object 1353 // has not been unlocked by an explicit monitorexit bytecode. 1354 const Address monitor(rbp, 1355 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1356 wordSize - sizeof(BasicObjectLock))); 1357 1358 // monitor expect in c_rarg1 for slow unlock path 1359 __ lea(c_rarg1, monitor); // address of first monitor 1360 1361 __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 1362 __ testptr(t, t); 1363 __ jcc(Assembler::notZero, unlock); 1364 1365 // Entry already unlocked, need to throw exception 1366 __ MacroAssembler::call_VM(noreg, 1367 CAST_FROM_FN_PTR(address, 1368 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1369 __ should_not_reach_here(); 1370 1371 __ bind(unlock); 1372 __ unlock_object(c_rarg1); 1373 } 1374 __ bind(L); 1375 } 1376 1377 // jvmti support 1378 // Note: This must happen _after_ handling/throwing any exceptions since 1379 // the exception handler code notifies the runtime of method exits 1380 // too. If this happens before, method entry/exit notifications are 1381 // not properly paired (was bug - gri 11/22/99). 1382 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1383 1384 // restore potential result in edx:eax, call result handler to 1385 // restore potential result in ST0 & handle result 1386 1387 __ pop(ltos); 1388 __ pop(dtos); 1389 1390 __ movptr(t, Address(rbp, 1391 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1392 __ call(t); 1393 1394 // remove activation 1395 __ movptr(t, Address(rbp, 1396 frame::interpreter_frame_sender_sp_offset * 1397 wordSize)); // get sender sp 1398 __ leave(); // remove frame anchor 1399 __ pop(rdi); // get return address 1400 __ mov(rsp, t); // set sp to sender sp 1401 __ jmp(rdi); 1402 1403 if (inc_counter) { 1404 // Handle overflow of counter and compile method 1405 __ bind(invocation_counter_overflow); 1406 generate_counter_overflow(&continue_after_compile); 1407 } 1408 1409 return entry_point; 1410 } 1411 1412 // 1413 // Generic interpreted method entry to (asm) interpreter 1414 // 1415 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1416 // determine code generation flags 1417 bool inc_counter = UseCompiler || CountCompiledCalls; 1418 1419 // ebx: Method* 1420 // r13: sender sp 1421 address entry_point = __ pc(); 1422 1423 const Address constMethod(rbx, Method::const_offset()); 1424 const Address access_flags(rbx, Method::access_flags_offset()); 1425 const Address size_of_parameters(rdx, 1426 ConstMethod::size_of_parameters_offset()); 1427 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1428 1429 1430 // get parameter size (always needed) 1431 __ movptr(rdx, constMethod); 1432 __ load_unsigned_short(rcx, size_of_parameters); 1433 1434 // rbx: Method* 1435 // rcx: size of parameters 1436 // r13: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1437 1438 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1439 __ subl(rdx, rcx); // rdx = no. of additional locals 1440 1441 // YYY 1442 // __ incrementl(rdx); 1443 // __ andl(rdx, -2); 1444 1445 // see if we've got enough room on the stack for locals plus overhead. 1446 generate_stack_overflow_check(); 1447 1448 // get return address 1449 __ pop(rax); 1450 1451 // compute beginning of parameters (r14) 1452 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); 1453 1454 // rdx - # of additional locals 1455 // allocate space for locals 1456 // explicitly initialize locals 1457 { 1458 Label exit, loop; 1459 __ testl(rdx, rdx); 1460 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1461 __ bind(loop); 1462 __ push((int) NULL_WORD); // initialize local variables 1463 __ decrementl(rdx); // until everything initialized 1464 __ jcc(Assembler::greater, loop); 1465 __ bind(exit); 1466 } 1467 1468 // initialize fixed part of activation frame 1469 generate_fixed_frame(false); 1470 1471 // make sure method is not native & not abstract 1472 #ifdef ASSERT 1473 __ movl(rax, access_flags); 1474 { 1475 Label L; 1476 __ testl(rax, JVM_ACC_NATIVE); 1477 __ jcc(Assembler::zero, L); 1478 __ stop("tried to execute native method as non-native"); 1479 __ bind(L); 1480 } 1481 { 1482 Label L; 1483 __ testl(rax, JVM_ACC_ABSTRACT); 1484 __ jcc(Assembler::zero, L); 1485 __ stop("tried to execute abstract method in interpreter"); 1486 __ bind(L); 1487 } 1488 #endif 1489 1490 // Since at this point in the method invocation the exception 1491 // handler would try to exit the monitor of synchronized methods 1492 // which hasn't been entered yet, we set the thread local variable 1493 // _do_not_unlock_if_synchronized to true. The remove_activation 1494 // will check this flag. 1495 1496 const Address do_not_unlock_if_synchronized(r15_thread, 1497 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1498 __ movbool(do_not_unlock_if_synchronized, true); 1499 1500 // increment invocation count & check for overflow 1501 Label invocation_counter_overflow; 1502 Label profile_method; 1503 Label profile_method_continue; 1504 if (inc_counter) { 1505 generate_counter_incr(&invocation_counter_overflow, 1506 &profile_method, 1507 &profile_method_continue); 1508 if (ProfileInterpreter) { 1509 __ bind(profile_method_continue); 1510 } 1511 } 1512 1513 Label continue_after_compile; 1514 __ bind(continue_after_compile); 1515 1516 // check for synchronized interpreted methods 1517 bang_stack_shadow_pages(false); 1518 1519 // reset the _do_not_unlock_if_synchronized flag 1520 __ movbool(do_not_unlock_if_synchronized, false); 1521 1522 // check for synchronized methods 1523 // Must happen AFTER invocation_counter check and stack overflow check, 1524 // so method is not locked if overflows. 1525 if (synchronized) { 1526 // Allocate monitor and lock method 1527 lock_method(); 1528 } else { 1529 // no synchronization necessary 1530 #ifdef ASSERT 1531 { 1532 Label L; 1533 __ movl(rax, access_flags); 1534 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1535 __ jcc(Assembler::zero, L); 1536 __ stop("method needs synchronization"); 1537 __ bind(L); 1538 } 1539 #endif 1540 } 1541 1542 // start execution 1543 #ifdef ASSERT 1544 { 1545 Label L; 1546 const Address monitor_block_top (rbp, 1547 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1548 __ movptr(rax, monitor_block_top); 1549 __ cmpptr(rax, rsp); 1550 __ jcc(Assembler::equal, L); 1551 __ stop("broken stack frame setup in interpreter"); 1552 __ bind(L); 1553 } 1554 #endif 1555 1556 // jvmti support 1557 __ notify_method_entry(); 1558 1559 __ dispatch_next(vtos); 1560 1561 // invocation counter overflow 1562 if (inc_counter) { 1563 if (ProfileInterpreter) { 1564 // We have decided to profile this method in the interpreter 1565 __ bind(profile_method); 1566 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1567 __ set_method_data_pointer_for_bcp(); 1568 __ get_method(rbx); 1569 __ jmp(profile_method_continue); 1570 } 1571 // Handle overflow of counter and compile method 1572 __ bind(invocation_counter_overflow); 1573 generate_counter_overflow(&continue_after_compile); 1574 } 1575 1576 return entry_point; 1577 } 1578 1579 // Entry points 1580 // 1581 // Here we generate the various kind of entries into the interpreter. 1582 // The two main entry type are generic bytecode methods and native 1583 // call method. These both come in synchronized and non-synchronized 1584 // versions but the frame layout they create is very similar. The 1585 // other method entry types are really just special purpose entries 1586 // that are really entry and interpretation all in one. These are for 1587 // trivial methods like accessor, empty, or special math methods. 1588 // 1589 // When control flow reaches any of the entry types for the interpreter 1590 // the following holds -> 1591 // 1592 // Arguments: 1593 // 1594 // rbx: Method* 1595 // 1596 // Stack layout immediately at entry 1597 // 1598 // [ return address ] <--- rsp 1599 // [ parameter n ] 1600 // ... 1601 // [ parameter 1 ] 1602 // [ expression stack ] (caller's java expression stack) 1603 1604 // Assuming that we don't go to one of the trivial specialized entries 1605 // the stack will look like below when we are ready to execute the 1606 // first bytecode (or call the native routine). The register usage 1607 // will be as the template based interpreter expects (see 1608 // interpreter_amd64.hpp). 1609 // 1610 // local variables follow incoming parameters immediately; i.e. 1611 // the return address is moved to the end of the locals). 1612 // 1613 // [ monitor entry ] <--- rsp 1614 // ... 1615 // [ monitor entry ] 1616 // [ expr. stack bottom ] 1617 // [ saved r13 ] 1618 // [ current r14 ] 1619 // [ Method* ] 1620 // [ saved ebp ] <--- rbp 1621 // [ return address ] 1622 // [ local variable m ] 1623 // ... 1624 // [ local variable 1 ] 1625 // [ parameter n ] 1626 // ... 1627 // [ parameter 1 ] <--- r14 1628 1629 address AbstractInterpreterGenerator::generate_method_entry( 1630 AbstractInterpreter::MethodKind kind) { 1631 // determine code generation flags 1632 bool synchronized = false; 1633 address entry_point = NULL; 1634 InterpreterGenerator* ig_this = (InterpreterGenerator*)this; 1635 1636 switch (kind) { 1637 case Interpreter::zerolocals : break; 1638 case Interpreter::zerolocals_synchronized: synchronized = true; break; 1639 case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; 1640 case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; 1641 case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; 1642 case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; 1643 case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; 1644 1645 case Interpreter::java_lang_math_sin : // fall thru 1646 case Interpreter::java_lang_math_cos : // fall thru 1647 case Interpreter::java_lang_math_tan : // fall thru 1648 case Interpreter::java_lang_math_abs : // fall thru 1649 case Interpreter::java_lang_math_log : // fall thru 1650 case Interpreter::java_lang_math_log10 : // fall thru 1651 case Interpreter::java_lang_math_sqrt : // fall thru 1652 case Interpreter::java_lang_math_pow : // fall thru 1653 case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; 1654 case Interpreter::java_lang_ref_reference_get 1655 : entry_point = ig_this->generate_Reference_get_entry(); break; 1656 case Interpreter::java_util_zip_CRC32_update 1657 : entry_point = ig_this->generate_CRC32_update_entry(); break; 1658 case Interpreter::java_util_zip_CRC32_updateBytes 1659 : // fall thru 1660 case Interpreter::java_util_zip_CRC32_updateByteBuffer 1661 : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; 1662 default: 1663 fatal(err_msg("unexpected method kind: %d", kind)); 1664 break; 1665 } 1666 1667 if (entry_point) { 1668 return entry_point; 1669 } 1670 1671 return ig_this->generate_normal_entry(synchronized); 1672 } 1673 1674 // These should never be compiled since the interpreter will prefer 1675 // the compiled version to the intrinsic version. 1676 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1677 switch (method_kind(m)) { 1678 case Interpreter::java_lang_math_sin : // fall thru 1679 case Interpreter::java_lang_math_cos : // fall thru 1680 case Interpreter::java_lang_math_tan : // fall thru 1681 case Interpreter::java_lang_math_abs : // fall thru 1682 case Interpreter::java_lang_math_log : // fall thru 1683 case Interpreter::java_lang_math_log10 : // fall thru 1684 case Interpreter::java_lang_math_sqrt : // fall thru 1685 case Interpreter::java_lang_math_pow : // fall thru 1686 case Interpreter::java_lang_math_exp : 1687 return false; 1688 default: 1689 return true; 1690 } 1691 } 1692 1693 // How much stack a method activation needs in words. 1694 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1695 const int entry_size = frame::interpreter_frame_monitor_size(); 1696 1697 // total overhead size: entry_size + (saved rbp thru expr stack 1698 // bottom). be sure to change this if you add/subtract anything 1699 // to/from the overhead area 1700 const int overhead_size = 1701 -(frame::interpreter_frame_initial_sp_offset) + entry_size; 1702 1703 const int stub_code = frame::entry_frame_after_call_words; 1704 const int method_stack = (method->max_locals() + method->max_stack()) * 1705 Interpreter::stackElementWords; 1706 return (overhead_size + method_stack + stub_code); 1707 } 1708 1709 int AbstractInterpreter::layout_activation(Method* method, 1710 int tempcount, 1711 int popframe_extra_args, 1712 int moncount, 1713 int caller_actual_parameters, 1714 int callee_param_count, 1715 int callee_locals, 1716 frame* caller, 1717 frame* interpreter_frame, 1718 bool is_top_frame, 1719 bool is_bottom_frame) { 1720 // Note: This calculation must exactly parallel the frame setup 1721 // in AbstractInterpreterGenerator::generate_method_entry. 1722 // If interpreter_frame!=NULL, set up the method, locals, and monitors. 1723 // The frame interpreter_frame, if not NULL, is guaranteed to be the 1724 // right size, as determined by a previous call to this method. 1725 // It is also guaranteed to be walkable even though it is in a skeletal state 1726 1727 // fixed size of an interpreter frame: 1728 int max_locals = method->max_locals() * Interpreter::stackElementWords; 1729 int extra_locals = (method->max_locals() - method->size_of_parameters()) * 1730 Interpreter::stackElementWords; 1731 1732 int overhead = frame::sender_sp_offset - 1733 frame::interpreter_frame_initial_sp_offset; 1734 // Our locals were accounted for by the caller (or last_frame_adjust 1735 // on the transistion) Since the callee parameters already account 1736 // for the callee's params we only need to account for the extra 1737 // locals. 1738 int size = overhead + 1739 (callee_locals - callee_param_count)*Interpreter::stackElementWords + 1740 moncount * frame::interpreter_frame_monitor_size() + 1741 tempcount* Interpreter::stackElementWords + popframe_extra_args; 1742 if (interpreter_frame != NULL) { 1743 #ifdef ASSERT 1744 if (!EnableInvokeDynamic) 1745 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences? 1746 // Probably, since deoptimization doesn't work yet. 1747 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable"); 1748 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)"); 1749 #endif 1750 1751 interpreter_frame->interpreter_frame_set_method(method); 1752 // NOTE the difference in using sender_sp and 1753 // interpreter_frame_sender_sp interpreter_frame_sender_sp is 1754 // the original sp of the caller (the unextended_sp) and 1755 // sender_sp is fp+16 XXX 1756 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; 1757 1758 #ifdef ASSERT 1759 if (caller->is_interpreted_frame()) { 1760 assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); 1761 } 1762 #endif 1763 1764 interpreter_frame->interpreter_frame_set_locals(locals); 1765 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); 1766 BasicObjectLock* monbot = montop - moncount; 1767 interpreter_frame->interpreter_frame_set_monitor_end(monbot); 1768 1769 // Set last_sp 1770 intptr_t* esp = (intptr_t*) monbot - 1771 tempcount*Interpreter::stackElementWords - 1772 popframe_extra_args; 1773 interpreter_frame->interpreter_frame_set_last_sp(esp); 1774 1775 // All frames but the initial (oldest) interpreter frame we fill in have 1776 // a value for sender_sp that allows walking the stack but isn't 1777 // truly correct. Correct the value here. 1778 if (extra_locals != 0 && 1779 interpreter_frame->sender_sp() == 1780 interpreter_frame->interpreter_frame_sender_sp()) { 1781 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + 1782 extra_locals); 1783 } 1784 *interpreter_frame->interpreter_frame_cache_addr() = 1785 method->constants()->cache(); 1786 } 1787 return size; 1788 } 1789 1790 //----------------------------------------------------------------------------- 1791 // Exceptions 1792 1793 void TemplateInterpreterGenerator::generate_throw_exception() { 1794 // Entry point in previous activation (i.e., if the caller was 1795 // interpreted) 1796 Interpreter::_rethrow_exception_entry = __ pc(); 1797 // Restore sp to interpreter_frame_last_sp even though we are going 1798 // to empty the expression stack for the exception processing. 1799 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1800 // rax: exception 1801 // rdx: return address/pc that threw exception 1802 __ restore_bcp(); // r13 points to call/send 1803 __ restore_locals(); 1804 __ reinit_heapbase(); // restore r12 as heapbase. 1805 // Entry point for exceptions thrown within interpreter code 1806 Interpreter::_throw_exception_entry = __ pc(); 1807 // expression stack is undefined here 1808 // rax: exception 1809 // r13: exception bcp 1810 __ verify_oop(rax); 1811 __ mov(c_rarg1, rax); 1812 1813 // expression stack must be empty before entering the VM in case of 1814 // an exception 1815 __ empty_expression_stack(); 1816 // find exception handler address and preserve exception oop 1817 __ call_VM(rdx, 1818 CAST_FROM_FN_PTR(address, 1819 InterpreterRuntime::exception_handler_for_exception), 1820 c_rarg1); 1821 // rax: exception handler entry point 1822 // rdx: preserved exception oop 1823 // r13: bcp for exception handler 1824 __ push_ptr(rdx); // push exception which is now the only value on the stack 1825 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1826 1827 // If the exception is not handled in the current frame the frame is 1828 // removed and the exception is rethrown (i.e. exception 1829 // continuation is _rethrow_exception). 1830 // 1831 // Note: At this point the bci is still the bxi for the instruction 1832 // which caused the exception and the expression stack is 1833 // empty. Thus, for any VM calls at this point, GC will find a legal 1834 // oop map (with empty expression stack). 1835 1836 // In current activation 1837 // tos: exception 1838 // esi: exception bcp 1839 1840 // 1841 // JVMTI PopFrame support 1842 // 1843 1844 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1845 __ empty_expression_stack(); 1846 // Set the popframe_processing bit in pending_popframe_condition 1847 // indicating that we are currently handling popframe, so that 1848 // call_VMs that may happen later do not trigger new popframe 1849 // handling cycles. 1850 __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset())); 1851 __ orl(rdx, JavaThread::popframe_processing_bit); 1852 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx); 1853 1854 { 1855 // Check to see whether we are returning to a deoptimized frame. 1856 // (The PopFrame call ensures that the caller of the popped frame is 1857 // either interpreted or compiled and deoptimizes it if compiled.) 1858 // In this case, we can't call dispatch_next() after the frame is 1859 // popped, but instead must save the incoming arguments and restore 1860 // them after deoptimization has occurred. 1861 // 1862 // Note that we don't compare the return PC against the 1863 // deoptimization blob's unpack entry because of the presence of 1864 // adapter frames in C2. 1865 Label caller_not_deoptimized; 1866 __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize)); 1867 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1868 InterpreterRuntime::interpreter_contains), c_rarg1); 1869 __ testl(rax, rax); 1870 __ jcc(Assembler::notZero, caller_not_deoptimized); 1871 1872 // Compute size of arguments for saving when returning to 1873 // deoptimized caller 1874 __ get_method(rax); 1875 __ movptr(rax, Address(rax, Method::const_offset())); 1876 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1877 size_of_parameters_offset()))); 1878 __ shll(rax, Interpreter::logStackElementSize); 1879 __ restore_locals(); // XXX do we need this? 1880 __ subptr(r14, rax); 1881 __ addptr(r14, wordSize); 1882 // Save these arguments 1883 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1884 Deoptimization:: 1885 popframe_preserve_args), 1886 r15_thread, rax, r14); 1887 1888 __ remove_activation(vtos, rdx, 1889 /* throw_monitor_exception */ false, 1890 /* install_monitor_exception */ false, 1891 /* notify_jvmdi */ false); 1892 1893 // Inform deoptimization that it is responsible for restoring 1894 // these arguments 1895 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), 1896 JavaThread::popframe_force_deopt_reexecution_bit); 1897 1898 // Continue in deoptimization handler 1899 __ jmp(rdx); 1900 1901 __ bind(caller_not_deoptimized); 1902 } 1903 1904 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1905 /* throw_monitor_exception */ false, 1906 /* install_monitor_exception */ false, 1907 /* notify_jvmdi */ false); 1908 1909 // Finish with popframe handling 1910 // A previous I2C followed by a deoptimization might have moved the 1911 // outgoing arguments further up the stack. PopFrame expects the 1912 // mutations to those outgoing arguments to be preserved and other 1913 // constraints basically require this frame to look exactly as 1914 // though it had previously invoked an interpreted activation with 1915 // no space between the top of the expression stack (current 1916 // last_sp) and the top of stack. Rather than force deopt to 1917 // maintain this kind of invariant all the time we call a small 1918 // fixup routine to move the mutated arguments onto the top of our 1919 // expression stack if necessary. 1920 __ mov(c_rarg1, rsp); 1921 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1922 // PC must point into interpreter here 1923 __ set_last_Java_frame(noreg, rbp, __ pc()); 1924 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1925 __ reset_last_Java_frame(true, true); 1926 // Restore the last_sp and null it out 1927 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1928 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1929 1930 __ restore_bcp(); // XXX do we need this? 1931 __ restore_locals(); // XXX do we need this? 1932 // The method data pointer was incremented already during 1933 // call profiling. We have to restore the mdp for the current bcp. 1934 if (ProfileInterpreter) { 1935 __ set_method_data_pointer_for_bcp(); 1936 } 1937 1938 // Clear the popframe condition flag 1939 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), 1940 JavaThread::popframe_inactive); 1941 1942 #if INCLUDE_JVMTI 1943 if (EnableInvokeDynamic) { 1944 Label L_done; 1945 const Register local0 = r14; 1946 1947 __ cmpb(Address(r13, 0), Bytecodes::_invokestatic); 1948 __ jcc(Assembler::notEqual, L_done); 1949 1950 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1951 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1952 1953 __ get_method(rdx); 1954 __ movptr(rax, Address(local0, 0)); 1955 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13); 1956 1957 __ testptr(rax, rax); 1958 __ jcc(Assembler::zero, L_done); 1959 1960 __ movptr(Address(rbx, 0), rax); 1961 __ bind(L_done); 1962 } 1963 #endif // INCLUDE_JVMTI 1964 1965 __ dispatch_next(vtos); 1966 // end of PopFrame support 1967 1968 Interpreter::_remove_activation_entry = __ pc(); 1969 1970 // preserve exception over this code sequence 1971 __ pop_ptr(rax); 1972 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax); 1973 // remove the activation (without doing throws on illegalMonitorExceptions) 1974 __ remove_activation(vtos, rdx, false, true, false); 1975 // restore exception 1976 __ get_vm_result(rax, r15_thread); 1977 1978 // In between activations - previous activation type unknown yet 1979 // compute continuation point - the continuation point expects the 1980 // following registers set up: 1981 // 1982 // rax: exception 1983 // rdx: return address/pc that threw exception 1984 // rsp: expression stack of caller 1985 // rbp: ebp of caller 1986 __ push(rax); // save exception 1987 __ push(rdx); // save return address 1988 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1989 SharedRuntime::exception_handler_for_return_address), 1990 r15_thread, rdx); 1991 __ mov(rbx, rax); // save exception handler 1992 __ pop(rdx); // restore return address 1993 __ pop(rax); // restore exception 1994 // Note that an "issuing PC" is actually the next PC after the call 1995 __ jmp(rbx); // jump to exception 1996 // handler of caller 1997 } 1998 1999 2000 // 2001 // JVMTI ForceEarlyReturn support 2002 // 2003 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 2004 address entry = __ pc(); 2005 2006 __ restore_bcp(); 2007 __ restore_locals(); 2008 __ empty_expression_stack(); 2009 __ load_earlyret_value(state); 2010 2011 __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); 2012 Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset()); 2013 2014 // Clear the earlyret state 2015 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 2016 2017 __ remove_activation(state, rsi, 2018 false, /* throw_monitor_exception */ 2019 false, /* install_monitor_exception */ 2020 true); /* notify_jvmdi */ 2021 __ jmp(rsi); 2022 2023 return entry; 2024 } // end of ForceEarlyReturn support 2025 2026 2027 //----------------------------------------------------------------------------- 2028 // Helper for vtos entry point generation 2029 2030 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 2031 address& bep, 2032 address& cep, 2033 address& sep, 2034 address& aep, 2035 address& iep, 2036 address& lep, 2037 address& fep, 2038 address& dep, 2039 address& vep) { 2040 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2041 Label L; 2042 aep = __ pc(); __ push_ptr(); __ jmp(L); 2043 fep = __ pc(); __ push_f(); __ jmp(L); 2044 dep = __ pc(); __ push_d(); __ jmp(L); 2045 lep = __ pc(); __ push_l(); __ jmp(L); 2046 bep = cep = sep = 2047 iep = __ pc(); __ push_i(); 2048 vep = __ pc(); 2049 __ bind(L); 2050 generate_and_dispatch(t); 2051 } 2052 2053 2054 //----------------------------------------------------------------------------- 2055 // Generation of individual instructions 2056 2057 // helpers for generate_and_dispatch 2058 2059 2060 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 2061 : TemplateInterpreterGenerator(code) { 2062 generate_all(); // down here so it can be "virtual" 2063 } 2064 2065 //----------------------------------------------------------------------------- 2066 2067 // Non-product code 2068 #ifndef PRODUCT 2069 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2070 address entry = __ pc(); 2071 2072 __ push(state); 2073 __ push(c_rarg0); 2074 __ push(c_rarg1); 2075 __ push(c_rarg2); 2076 __ push(c_rarg3); 2077 __ mov(c_rarg2, rax); // Pass itos 2078 #ifdef _WIN64 2079 __ movflt(xmm3, xmm0); // Pass ftos 2080 #endif 2081 __ call_VM(noreg, 2082 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), 2083 c_rarg1, c_rarg2, c_rarg3); 2084 __ pop(c_rarg3); 2085 __ pop(c_rarg2); 2086 __ pop(c_rarg1); 2087 __ pop(c_rarg0); 2088 __ pop(state); 2089 __ ret(0); // return from result handler 2090 2091 return entry; 2092 } 2093 2094 void TemplateInterpreterGenerator::count_bytecode() { 2095 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 2096 } 2097 2098 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2099 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 2100 } 2101 2102 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2103 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 2104 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 2105 __ orl(rbx, 2106 ((int) t->bytecode()) << 2107 BytecodePairHistogram::log2_number_of_codes); 2108 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 2109 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 2110 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 2111 } 2112 2113 2114 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2115 // Call a little run-time stub to avoid blow-up for each bytecode. 2116 // The run-time runtime saves the right registers, depending on 2117 // the tosca in-state for the given template. 2118 2119 assert(Interpreter::trace_code(t->tos_in()) != NULL, 2120 "entry must have been generated"); 2121 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 2122 __ andptr(rsp, -16); // align stack as required by ABI 2123 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 2124 __ mov(rsp, r12); // restore sp 2125 __ reinit_heapbase(); 2126 } 2127 2128 2129 void TemplateInterpreterGenerator::stop_interpreter_at() { 2130 Label L; 2131 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 2132 StopInterpreterAt); 2133 __ jcc(Assembler::notEqual, L); 2134 __ int3(); 2135 __ bind(L); 2136 } 2137 #endif // !PRODUCT 2138 #endif // ! CC_INTERP