1 /* 2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 47 #include "utilities/macros.hpp" 48 49 #define __ _masm-> 50 51 #ifndef CC_INTERP 52 53 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 54 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; 55 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 56 57 //----------------------------------------------------------------------------- 58 59 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 60 address entry = __ pc(); 61 62 #ifdef ASSERT 63 { 64 Label L; 65 __ lea(rax, Address(rbp, 66 frame::interpreter_frame_monitor_block_top_offset * 67 wordSize)); 68 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 69 // grows negative) 70 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 71 __ stop ("interpreter frame not set up"); 72 __ bind(L); 73 } 74 #endif // ASSERT 75 // Restore bcp under the assumption that the current frame is still 76 // interpreted 77 __ restore_bcp(); 78 79 // expression stack must be empty before entering the VM if an 80 // exception happened 81 __ empty_expression_stack(); 82 // throw exception 83 __ call_VM(noreg, 84 CAST_FROM_FN_PTR(address, 85 InterpreterRuntime::throw_StackOverflowError)); 86 return entry; 87 } 88 89 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 90 const char* name) { 91 address entry = __ pc(); 92 // expression stack must be empty before entering the VM if an 93 // exception happened 94 __ empty_expression_stack(); 95 // setup parameters 96 // ??? convention: expect aberrant index in register ebx 97 __ lea(c_rarg1, ExternalAddress((address)name)); 98 __ call_VM(noreg, 99 CAST_FROM_FN_PTR(address, 100 InterpreterRuntime:: 101 throw_ArrayIndexOutOfBoundsException), 102 c_rarg1, rbx); 103 return entry; 104 } 105 106 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 107 address entry = __ pc(); 108 109 // object is at TOS 110 __ pop(c_rarg1); 111 112 // expression stack must be empty before entering the VM if an 113 // exception happened 114 __ empty_expression_stack(); 115 116 __ call_VM(noreg, 117 CAST_FROM_FN_PTR(address, 118 InterpreterRuntime:: 119 throw_ClassCastException), 120 c_rarg1); 121 return entry; 122 } 123 124 address TemplateInterpreterGenerator::generate_exception_handler_common( 125 const char* name, const char* message, bool pass_oop) { 126 assert(!pass_oop || message == NULL, "either oop or message but not both"); 127 address entry = __ pc(); 128 if (pass_oop) { 129 // object is at TOS 130 __ pop(c_rarg2); 131 } 132 // expression stack must be empty before entering the VM if an 133 // exception happened 134 __ empty_expression_stack(); 135 // setup parameters 136 __ lea(c_rarg1, ExternalAddress((address)name)); 137 if (pass_oop) { 138 __ call_VM(rax, CAST_FROM_FN_PTR(address, 139 InterpreterRuntime:: 140 create_klass_exception), 141 c_rarg1, c_rarg2); 142 } else { 143 // kind of lame ExternalAddress can't take NULL because 144 // external_word_Relocation will assert. 145 if (message != NULL) { 146 __ lea(c_rarg2, ExternalAddress((address)message)); 147 } else { 148 __ movptr(c_rarg2, NULL_WORD); 149 } 150 __ call_VM(rax, 151 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 152 c_rarg1, c_rarg2); 153 } 154 // throw exception 155 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 156 return entry; 157 } 158 159 160 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 161 address entry = __ pc(); 162 // NULL last_sp until next java call 163 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 164 __ dispatch_next(state); 165 return entry; 166 } 167 168 169 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { 170 address entry = __ pc(); 171 172 // Restore stack bottom in case i2c adjusted stack 173 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 174 // and NULL it as marker that esp is now tos until next java call 175 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 176 177 __ restore_bcp(); 178 __ restore_locals(); 179 180 Label L_got_cache, L_giant_index; 181 if (EnableInvokeDynamic) { 182 __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic); 183 __ jcc(Assembler::equal, L_giant_index); 184 } 185 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); 186 __ bind(L_got_cache); 187 __ movl(rbx, Address(rbx, rcx, 188 Address::times_ptr, 189 in_bytes(ConstantPoolCache::base_offset()) + 190 3 * wordSize)); 191 __ andl(rbx, 0xFF); 192 __ lea(rsp, Address(rsp, rbx, Address::times_8)); 193 __ dispatch_next(state, step); 194 195 // out of the main line of code... 196 if (EnableInvokeDynamic) { 197 __ bind(L_giant_index); 198 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); 199 __ jmp(L_got_cache); 200 } 201 202 return entry; 203 } 204 205 206 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 207 int step) { 208 address entry = __ pc(); 209 // NULL last_sp until next java call 210 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 211 __ restore_bcp(); 212 __ restore_locals(); 213 // handle exceptions 214 { 215 Label L; 216 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 217 __ jcc(Assembler::zero, L); 218 __ call_VM(noreg, 219 CAST_FROM_FN_PTR(address, 220 InterpreterRuntime::throw_pending_exception)); 221 __ should_not_reach_here(); 222 __ bind(L); 223 } 224 __ dispatch_next(state, step); 225 return entry; 226 } 227 228 int AbstractInterpreter::BasicType_as_index(BasicType type) { 229 int i = 0; 230 switch (type) { 231 case T_BOOLEAN: i = 0; break; 232 case T_CHAR : i = 1; break; 233 case T_BYTE : i = 2; break; 234 case T_SHORT : i = 3; break; 235 case T_INT : i = 4; break; 236 case T_LONG : i = 5; break; 237 case T_VOID : i = 6; break; 238 case T_FLOAT : i = 7; break; 239 case T_DOUBLE : i = 8; break; 240 case T_OBJECT : i = 9; break; 241 case T_ARRAY : i = 9; break; 242 default : ShouldNotReachHere(); 243 } 244 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, 245 "index out of bounds"); 246 return i; 247 } 248 249 250 address TemplateInterpreterGenerator::generate_result_handler_for( 251 BasicType type) { 252 address entry = __ pc(); 253 switch (type) { 254 case T_BOOLEAN: __ c2bool(rax); break; 255 case T_CHAR : __ movzwl(rax, rax); break; 256 case T_BYTE : __ sign_extend_byte(rax); break; 257 case T_SHORT : __ sign_extend_short(rax); break; 258 case T_INT : /* nothing to do */ break; 259 case T_LONG : /* nothing to do */ break; 260 case T_VOID : /* nothing to do */ break; 261 case T_FLOAT : /* nothing to do */ break; 262 case T_DOUBLE : /* nothing to do */ break; 263 case T_OBJECT : 264 // retrieve result from frame 265 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 266 // and verify it 267 __ verify_oop(rax); 268 break; 269 default : ShouldNotReachHere(); 270 } 271 __ ret(0); // return from result handler 272 return entry; 273 } 274 275 address TemplateInterpreterGenerator::generate_safept_entry_for( 276 TosState state, 277 address runtime_entry) { 278 address entry = __ pc(); 279 __ push(state); 280 __ call_VM(noreg, runtime_entry); 281 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 282 return entry; 283 } 284 285 286 287 // Helpers for commoning out cases in the various type of method entries. 288 // 289 290 291 // increment invocation count & check for overflow 292 // 293 // Note: checking for negative value instead of overflow 294 // so we have a 'sticky' overflow test 295 // 296 // rbx: method 297 // ecx: invocation counter 298 // 299 void InterpreterGenerator::generate_counter_incr( 300 Label* overflow, 301 Label* profile_method, 302 Label* profile_method_continue) { 303 Label done; 304 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 305 if (TieredCompilation) { 306 int increment = InvocationCounter::count_increment; 307 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 308 Label no_mdo; 309 if (ProfileInterpreter) { 310 // Are we profiling? 311 __ movptr(rax, Address(rbx, Method::method_data_offset())); 312 __ testptr(rax, rax); 313 __ jccb(Assembler::zero, no_mdo); 314 // Increment counter in the MDO 315 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 316 in_bytes(InvocationCounter::counter_offset())); 317 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 318 __ jmp(done); 319 } 320 __ bind(no_mdo); 321 // Increment counter in MethodCounters 322 const Address invocation_counter(rax, 323 MethodCounters::invocation_counter_offset() + 324 InvocationCounter::counter_offset()); 325 __ get_method_counters(rbx, rax, done); 326 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 327 false, Assembler::zero, overflow); 328 __ bind(done); 329 } else { 330 const Address backedge_counter(rax, 331 MethodCounters::backedge_counter_offset() + 332 InvocationCounter::counter_offset()); 333 const Address invocation_counter(rax, 334 MethodCounters::invocation_counter_offset() + 335 InvocationCounter::counter_offset()); 336 337 __ get_method_counters(rbx, rax, done); 338 339 if (ProfileInterpreter) { 340 __ incrementl(Address(rax, 341 MethodCounters::interpreter_invocation_counter_offset())); 342 } 343 // Update standard invocation counters 344 __ movl(rcx, invocation_counter); 345 __ incrementl(rcx, InvocationCounter::count_increment); 346 __ movl(invocation_counter, rcx); // save invocation count 347 348 __ movl(rax, backedge_counter); // load backedge counter 349 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 350 351 __ addl(rcx, rax); // add both counters 352 353 // profile_method is non-null only for interpreted method so 354 // profile_method != NULL == !native_call 355 356 if (ProfileInterpreter && profile_method != NULL) { 357 // Test to see if we should create a method data oop 358 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 359 __ jcc(Assembler::less, *profile_method_continue); 360 361 // if no method data exists, go to profile_method 362 __ test_method_data_pointer(rax, *profile_method); 363 } 364 365 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 366 __ jcc(Assembler::aboveEqual, *overflow); 367 __ bind(done); 368 } 369 } 370 371 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 372 373 // Asm interpreter on entry 374 // r14 - locals 375 // r13 - bcp 376 // rbx - method 377 // edx - cpool --- DOES NOT APPEAR TO BE TRUE 378 // rbp - interpreter frame 379 380 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 381 // Everything as it was on entry 382 // rdx is not restored. Doesn't appear to really be set. 383 384 // InterpreterRuntime::frequency_counter_overflow takes two 385 // arguments, the first (thread) is passed by call_VM, the second 386 // indicates if the counter overflow occurs at a backwards branch 387 // (NULL bcp). We pass zero for it. The call returns the address 388 // of the verified entry point for the method or NULL if the 389 // compilation did not complete (either went background or bailed 390 // out). 391 __ movl(c_rarg1, 0); 392 __ call_VM(noreg, 393 CAST_FROM_FN_PTR(address, 394 InterpreterRuntime::frequency_counter_overflow), 395 c_rarg1); 396 397 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 398 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 399 // and jump to the interpreted entry. 400 __ jmp(*do_continue, relocInfo::none); 401 } 402 403 // See if we've got enough room on the stack for locals plus overhead. 404 // The expression stack grows down incrementally, so the normal guard 405 // page mechanism will work for that. 406 // 407 // NOTE: Since the additional locals are also always pushed (wasn't 408 // obvious in generate_method_entry) so the guard should work for them 409 // too. 410 // 411 // Args: 412 // rdx: number of additional locals this frame needs (what we must check) 413 // rbx: Method* 414 // 415 // Kills: 416 // rax 417 void InterpreterGenerator::generate_stack_overflow_check(void) { 418 419 // monitor entry size: see picture of stack set 420 // (generate_method_entry) and frame_amd64.hpp 421 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 422 423 // total overhead size: entry_size + (saved rbp through expr stack 424 // bottom). be sure to change this if you add/subtract anything 425 // to/from the overhead area 426 const int overhead_size = 427 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 428 429 const int page_size = os::vm_page_size(); 430 431 Label after_frame_check; 432 433 // see if the frame is greater than one page in size. If so, 434 // then we need to verify there is enough stack space remaining 435 // for the additional locals. 436 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 437 __ jcc(Assembler::belowEqual, after_frame_check); 438 439 // compute rsp as if this were going to be the last frame on 440 // the stack before the red zone 441 442 const Address stack_base(r15_thread, Thread::stack_base_offset()); 443 const Address stack_size(r15_thread, Thread::stack_size_offset()); 444 445 // locals + overhead, in bytes 446 __ mov(rax, rdx); 447 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter. 448 __ addptr(rax, overhead_size); 449 450 #ifdef ASSERT 451 Label stack_base_okay, stack_size_okay; 452 // verify that thread stack base is non-zero 453 __ cmpptr(stack_base, (int32_t)NULL_WORD); 454 __ jcc(Assembler::notEqual, stack_base_okay); 455 __ stop("stack base is zero"); 456 __ bind(stack_base_okay); 457 // verify that thread stack size is non-zero 458 __ cmpptr(stack_size, 0); 459 __ jcc(Assembler::notEqual, stack_size_okay); 460 __ stop("stack size is zero"); 461 __ bind(stack_size_okay); 462 #endif 463 464 // Add stack base to locals and subtract stack size 465 __ addptr(rax, stack_base); 466 __ subptr(rax, stack_size); 467 468 // Use the maximum number of pages we might bang. 469 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 470 (StackRedPages+StackYellowPages); 471 472 // add in the red and yellow zone sizes 473 __ addptr(rax, max_pages * page_size); 474 475 // check against the current stack bottom 476 __ cmpptr(rsp, rax); 477 __ jcc(Assembler::above, after_frame_check); 478 479 // Restore sender's sp as SP. This is necessary if the sender's 480 // frame is an extended compiled frame (see gen_c2i_adapter()) 481 // and safer anyway in case of JSR292 adaptations. 482 483 __ pop(rax); // return address must be moved if SP is changed 484 __ mov(rsp, r13); 485 __ push(rax); 486 487 // Note: the restored frame is not necessarily interpreted. 488 // Use the shared runtime version of the StackOverflowError. 489 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 490 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 491 492 // all done with frame size check 493 __ bind(after_frame_check); 494 } 495 496 // Allocate monitor and lock method (asm interpreter) 497 // 498 // Args: 499 // rbx: Method* 500 // r14: locals 501 // 502 // Kills: 503 // rax 504 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 505 // rscratch1, rscratch2 (scratch regs) 506 void InterpreterGenerator::lock_method(void) { 507 // synchronize method 508 const Address access_flags(rbx, Method::access_flags_offset()); 509 const Address monitor_block_top( 510 rbp, 511 frame::interpreter_frame_monitor_block_top_offset * wordSize); 512 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 513 514 #ifdef ASSERT 515 { 516 Label L; 517 __ movl(rax, access_flags); 518 __ testl(rax, JVM_ACC_SYNCHRONIZED); 519 __ jcc(Assembler::notZero, L); 520 __ stop("method doesn't need synchronization"); 521 __ bind(L); 522 } 523 #endif // ASSERT 524 525 // get synchronization object 526 { 527 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 528 Label done; 529 __ movl(rax, access_flags); 530 __ testl(rax, JVM_ACC_STATIC); 531 // get receiver (assume this is frequent case) 532 __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0))); 533 __ jcc(Assembler::zero, done); 534 __ movptr(rax, Address(rbx, Method::const_offset())); 535 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); 536 __ movptr(rax, Address(rax, 537 ConstantPool::pool_holder_offset_in_bytes())); 538 __ movptr(rax, Address(rax, mirror_offset)); 539 540 #ifdef ASSERT 541 { 542 Label L; 543 __ testptr(rax, rax); 544 __ jcc(Assembler::notZero, L); 545 __ stop("synchronization object is NULL"); 546 __ bind(L); 547 } 548 #endif // ASSERT 549 550 __ bind(done); 551 } 552 553 // add space for monitor & lock 554 __ subptr(rsp, entry_size); // add space for a monitor entry 555 __ movptr(monitor_block_top, rsp); // set new monitor block top 556 // store object 557 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 558 __ movptr(c_rarg1, rsp); // object address 559 __ lock_object(c_rarg1); 560 } 561 562 // Generate a fixed interpreter frame. This is identical setup for 563 // interpreted methods and for native methods hence the shared code. 564 // 565 // Args: 566 // rax: return address 567 // rbx: Method* 568 // r14: pointer to locals 569 // r13: sender sp 570 // rdx: cp cache 571 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 572 // initialize fixed part of activation frame 573 __ push(rax); // save return address 574 __ enter(); // save old & set new rbp 575 __ push(r13); // set sender sp 576 __ push((int)NULL_WORD); // leave last_sp as null 577 __ movptr(r13, Address(rbx, Method::const_offset())); // get ConstMethod* 578 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase 579 __ push(rbx); // save Method* 580 if (ProfileInterpreter) { 581 Label method_data_continue; 582 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 583 __ testptr(rdx, rdx); 584 __ jcc(Assembler::zero, method_data_continue); 585 __ addptr(rdx, in_bytes(MethodData::data_offset())); 586 __ bind(method_data_continue); 587 __ push(rdx); // set the mdp (method data pointer) 588 } else { 589 __ push(0); 590 } 591 592 __ movptr(rdx, Address(rbx, Method::const_offset())); 593 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 594 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 595 __ push(rdx); // set constant pool cache 596 __ push(r14); // set locals pointer 597 if (native_call) { 598 __ push(0); // no bcp 599 } else { 600 __ push(r13); // set bcp 601 } 602 __ push(0); // reserve word for pointer to expression stack bottom 603 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 604 } 605 606 // End of helpers 607 608 // Various method entries 609 //------------------------------------------------------------------------------------------------------------------------ 610 // 611 // 612 613 // Call an accessor method (assuming it is resolved, otherwise drop 614 // into vanilla (slow path) entry 615 address InterpreterGenerator::generate_accessor_entry(void) { 616 // rbx: Method* 617 618 // r13: senderSP must preserver for slow path, set SP to it on fast path 619 620 address entry_point = __ pc(); 621 Label xreturn_path; 622 623 // do fastpath for resolved accessor methods 624 if (UseFastAccessorMethods) { 625 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites 626 // thereof; parameter size = 1 627 // Note: We can only use this code if the getfield has been resolved 628 // and if we don't have a null-pointer exception => check for 629 // these conditions first and use slow path if necessary. 630 Label slow_path; 631 // If we need a safepoint check, generate full interpreter entry. 632 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 633 SafepointSynchronize::_not_synchronized); 634 635 __ jcc(Assembler::notEqual, slow_path); 636 // rbx: method 637 __ movptr(rax, Address(rsp, wordSize)); 638 639 // check if local 0 != NULL and read field 640 __ testptr(rax, rax); 641 __ jcc(Assembler::zero, slow_path); 642 643 // read first instruction word and extract bytecode @ 1 and index @ 2 644 __ movptr(rdx, Address(rbx, Method::const_offset())); 645 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset())); 646 __ movl(rdx, Address(rdx, ConstMethod::codes_offset())); 647 // Shift codes right to get the index on the right. 648 // The bytecode fetched looks like <index><0xb4><0x2a> 649 __ shrl(rdx, 2 * BitsPerByte); 650 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); 651 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes())); 652 653 // rax: local 0 654 // rbx: method 655 // rdx: constant pool cache index 656 // rdi: constant pool cache 657 658 // check if getfield has been resolved and read constant pool cache entry 659 // check the validity of the cache entry by testing whether _indices field 660 // contains Bytecode::_getfield in b1 byte. 661 assert(in_words(ConstantPoolCacheEntry::size()) == 4, 662 "adjust shift below"); 663 __ movl(rcx, 664 Address(rdi, 665 rdx, 666 Address::times_8, 667 ConstantPoolCache::base_offset() + 668 ConstantPoolCacheEntry::indices_offset())); 669 __ shrl(rcx, 2 * BitsPerByte); 670 __ andl(rcx, 0xFF); 671 __ cmpl(rcx, Bytecodes::_getfield); 672 __ jcc(Assembler::notEqual, slow_path); 673 674 // Note: constant pool entry is not valid before bytecode is resolved 675 __ movptr(rcx, 676 Address(rdi, 677 rdx, 678 Address::times_8, 679 ConstantPoolCache::base_offset() + 680 ConstantPoolCacheEntry::f2_offset())); 681 // edx: flags 682 __ movl(rdx, 683 Address(rdi, 684 rdx, 685 Address::times_8, 686 ConstantPoolCache::base_offset() + 687 ConstantPoolCacheEntry::flags_offset())); 688 689 Label notObj, notInt, notByte, notShort; 690 const Address field_address(rax, rcx, Address::times_1); 691 692 // Need to differentiate between igetfield, agetfield, bgetfield etc. 693 // because they are different sizes. 694 // Use the type from the constant pool cache 695 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); 696 // Make sure we don't need to mask edx after the above shift 697 ConstantPoolCacheEntry::verify_tos_state_shift(); 698 699 __ cmpl(rdx, atos); 700 __ jcc(Assembler::notEqual, notObj); 701 // atos 702 __ load_heap_oop(rax, field_address); 703 __ jmp(xreturn_path); 704 705 __ bind(notObj); 706 __ cmpl(rdx, itos); 707 __ jcc(Assembler::notEqual, notInt); 708 // itos 709 __ movl(rax, field_address); 710 __ jmp(xreturn_path); 711 712 __ bind(notInt); 713 __ cmpl(rdx, btos); 714 __ jcc(Assembler::notEqual, notByte); 715 // btos 716 __ load_signed_byte(rax, field_address); 717 __ jmp(xreturn_path); 718 719 __ bind(notByte); 720 __ cmpl(rdx, stos); 721 __ jcc(Assembler::notEqual, notShort); 722 // stos 723 __ load_signed_short(rax, field_address); 724 __ jmp(xreturn_path); 725 726 __ bind(notShort); 727 #ifdef ASSERT 728 Label okay; 729 __ cmpl(rdx, ctos); 730 __ jcc(Assembler::equal, okay); 731 __ stop("what type is this?"); 732 __ bind(okay); 733 #endif 734 // ctos 735 __ load_unsigned_short(rax, field_address); 736 737 __ bind(xreturn_path); 738 739 // _ireturn/_areturn 740 __ pop(rdi); 741 __ mov(rsp, r13); 742 __ jmp(rdi); 743 __ ret(0); 744 745 // generate a vanilla interpreter entry as the slow path 746 __ bind(slow_path); 747 (void) generate_normal_entry(false); 748 } else { 749 (void) generate_normal_entry(false); 750 } 751 752 return entry_point; 753 } 754 755 // Method entry for java.lang.ref.Reference.get. 756 address InterpreterGenerator::generate_Reference_get_entry(void) { 757 #if INCLUDE_ALL_GCS 758 // Code: _aload_0, _getfield, _areturn 759 // parameter size = 1 760 // 761 // The code that gets generated by this routine is split into 2 parts: 762 // 1. The "intrinsified" code for G1 (or any SATB based GC), 763 // 2. The slow path - which is an expansion of the regular method entry. 764 // 765 // Notes:- 766 // * In the G1 code we do not check whether we need to block for 767 // a safepoint. If G1 is enabled then we must execute the specialized 768 // code for Reference.get (except when the Reference object is null) 769 // so that we can log the value in the referent field with an SATB 770 // update buffer. 771 // If the code for the getfield template is modified so that the 772 // G1 pre-barrier code is executed when the current method is 773 // Reference.get() then going through the normal method entry 774 // will be fine. 775 // * The G1 code can, however, check the receiver object (the instance 776 // of java.lang.Reference) and jump to the slow path if null. If the 777 // Reference object is null then we obviously cannot fetch the referent 778 // and so we don't need to call the G1 pre-barrier. Thus we can use the 779 // regular method entry code to generate the NPE. 780 // 781 // This code is based on generate_accessor_enty. 782 // 783 // rbx: Method* 784 785 // r13: senderSP must preserve for slow path, set SP to it on fast path 786 787 address entry = __ pc(); 788 789 const int referent_offset = java_lang_ref_Reference::referent_offset; 790 guarantee(referent_offset > 0, "referent offset not initialized"); 791 792 if (UseG1GC) { 793 Label slow_path; 794 // rbx: method 795 796 // Check if local 0 != NULL 797 // If the receiver is null then it is OK to jump to the slow path. 798 __ movptr(rax, Address(rsp, wordSize)); 799 800 __ testptr(rax, rax); 801 __ jcc(Assembler::zero, slow_path); 802 803 // rax: local 0 804 // rbx: method (but can be used as scratch now) 805 // rdx: scratch 806 // rdi: scratch 807 808 // Generate the G1 pre-barrier code to log the value of 809 // the referent field in an SATB buffer. 810 811 // Load the value of the referent field. 812 const Address field_address(rax, referent_offset); 813 __ load_heap_oop(rax, field_address); 814 815 // Generate the G1 pre-barrier code to log the value of 816 // the referent field in an SATB buffer. 817 __ g1_write_barrier_pre(noreg /* obj */, 818 rax /* pre_val */, 819 r15_thread /* thread */, 820 rbx /* tmp */, 821 true /* tosca_live */, 822 true /* expand_call */); 823 824 // _areturn 825 __ pop(rdi); // get return address 826 __ mov(rsp, r13); // set sp to sender sp 827 __ jmp(rdi); 828 __ ret(0); 829 830 // generate a vanilla interpreter entry as the slow path 831 __ bind(slow_path); 832 (void) generate_normal_entry(false); 833 834 return entry; 835 } 836 #endif // INCLUDE_ALL_GCS 837 838 // If G1 is not enabled then attempt to go through the accessor entry point 839 // Reference.get is an accessor 840 return generate_accessor_entry(); 841 } 842 843 /** 844 * Method entry for static native methods: 845 * int java.util.zip.CRC32.update(int crc, int b) 846 */ 847 address InterpreterGenerator::generate_CRC32_update_entry() { 848 if (UseCRC32Intrinsics) { 849 address entry = __ pc(); 850 851 // rbx,: Method* 852 // r13: senderSP must preserved for slow path, set SP to it on fast path 853 // c_rarg0: scratch (rdi on non-Win64, rcx on Win64) 854 // c_rarg1: scratch (rsi on non-Win64, rdx on Win64) 855 856 Label slow_path; 857 // If we need a safepoint check, generate full interpreter entry. 858 ExternalAddress state(SafepointSynchronize::address_of_state()); 859 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 860 SafepointSynchronize::_not_synchronized); 861 __ jcc(Assembler::notEqual, slow_path); 862 863 // We don't generate local frame and don't align stack because 864 // we call stub code and there is no safepoint on this path. 865 866 // Load parameters 867 const Register crc = rax; // crc 868 const Register val = c_rarg0; // source java byte value 869 const Register tbl = c_rarg1; // scratch 870 871 // Arguments are reversed on java expression stack 872 __ movl(val, Address(rsp, wordSize)); // byte value 873 __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC 874 875 __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); 876 __ notl(crc); // ~crc 877 __ update_byte_crc32(crc, val, tbl); 878 __ notl(crc); // ~crc 879 // result in rax 880 881 // _areturn 882 __ pop(rdi); // get return address 883 __ mov(rsp, r13); // set sp to sender sp 884 __ jmp(rdi); 885 886 // generate a vanilla native entry as the slow path 887 __ bind(slow_path); 888 889 (void) generate_native_entry(false); 890 891 return entry; 892 } 893 return generate_native_entry(false); 894 } 895 896 /** 897 * Method entry for static native methods: 898 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 899 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 900 */ 901 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 902 if (UseCRC32Intrinsics) { 903 address entry = __ pc(); 904 905 // rbx,: Method* 906 // r13: senderSP must preserved for slow path, set SP to it on fast path 907 908 Label slow_path; 909 // If we need a safepoint check, generate full interpreter entry. 910 ExternalAddress state(SafepointSynchronize::address_of_state()); 911 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 912 SafepointSynchronize::_not_synchronized); 913 __ jcc(Assembler::notEqual, slow_path); 914 915 // We don't generate local frame and don't align stack because 916 // we call stub code and there is no safepoint on this path. 917 918 // Load parameters 919 const Register crc = c_rarg0; // crc 920 const Register buf = c_rarg1; // source java byte array address 921 const Register len = c_rarg2; // length 922 const Register off = len; // offset (never overlaps with 'len') 923 924 // Arguments are reversed on java expression stack 925 // Calculate address of start element 926 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 927 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf 928 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset 929 __ addq(buf, off); // + offset 930 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC 931 } else { 932 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array 933 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 934 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset 935 __ addq(buf, off); // + offset 936 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC 937 } 938 // Can now load 'len' since we're finished with 'off' 939 __ movl(len, Address(rsp, wordSize)); // Length 940 941 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); 942 // result in rax 943 944 // _areturn 945 __ pop(rdi); // get return address 946 __ mov(rsp, r13); // set sp to sender sp 947 __ jmp(rdi); 948 949 // generate a vanilla native entry as the slow path 950 __ bind(slow_path); 951 952 (void) generate_native_entry(false); 953 954 return entry; 955 } 956 return generate_native_entry(false); 957 } 958 959 // Interpreter stub for calling a native method. (asm interpreter) 960 // This sets up a somewhat different looking stack for calling the 961 // native method than the typical interpreter frame setup. 962 address InterpreterGenerator::generate_native_entry(bool synchronized) { 963 // determine code generation flags 964 bool inc_counter = UseCompiler || CountCompiledCalls; 965 966 // rbx: Method* 967 // r13: sender sp 968 969 address entry_point = __ pc(); 970 971 const Address constMethod (rbx, Method::const_offset()); 972 const Address access_flags (rbx, Method::access_flags_offset()); 973 const Address size_of_parameters(rcx, ConstMethod:: 974 size_of_parameters_offset()); 975 976 977 // get parameter size (always needed) 978 __ movptr(rcx, constMethod); 979 __ load_unsigned_short(rcx, size_of_parameters); 980 981 // native calls don't need the stack size check since they have no 982 // expression stack and the arguments are already on the stack and 983 // we only add a handful of words to the stack 984 985 // rbx: Method* 986 // rcx: size of parameters 987 // r13: sender sp 988 __ pop(rax); // get return address 989 990 // for natives the size of locals is zero 991 992 // compute beginning of parameters (r14) 993 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); 994 995 // add 2 zero-initialized slots for native calls 996 // initialize result_handler slot 997 __ push((int) NULL_WORD); 998 // slot for oop temp 999 // (static native method holder mirror/jni oop result) 1000 __ push((int) NULL_WORD); 1001 1002 // initialize fixed part of activation frame 1003 generate_fixed_frame(true); 1004 1005 // make sure method is native & not abstract 1006 #ifdef ASSERT 1007 __ movl(rax, access_flags); 1008 { 1009 Label L; 1010 __ testl(rax, JVM_ACC_NATIVE); 1011 __ jcc(Assembler::notZero, L); 1012 __ stop("tried to execute non-native method as native"); 1013 __ bind(L); 1014 } 1015 { 1016 Label L; 1017 __ testl(rax, JVM_ACC_ABSTRACT); 1018 __ jcc(Assembler::zero, L); 1019 __ stop("tried to execute abstract method in interpreter"); 1020 __ bind(L); 1021 } 1022 #endif 1023 1024 // Since at this point in the method invocation the exception handler 1025 // would try to exit the monitor of synchronized methods which hasn't 1026 // been entered yet, we set the thread local variable 1027 // _do_not_unlock_if_synchronized to true. The remove_activation will 1028 // check this flag. 1029 1030 const Address do_not_unlock_if_synchronized(r15_thread, 1031 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1032 __ movbool(do_not_unlock_if_synchronized, true); 1033 1034 // increment invocation count & check for overflow 1035 Label invocation_counter_overflow; 1036 if (inc_counter) { 1037 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1038 } 1039 1040 Label continue_after_compile; 1041 __ bind(continue_after_compile); 1042 1043 bang_stack_shadow_pages(true); 1044 1045 // reset the _do_not_unlock_if_synchronized flag 1046 __ movbool(do_not_unlock_if_synchronized, false); 1047 1048 // check for synchronized methods 1049 // Must happen AFTER invocation_counter check and stack overflow check, 1050 // so method is not locked if overflows. 1051 if (synchronized) { 1052 lock_method(); 1053 } else { 1054 // no synchronization necessary 1055 #ifdef ASSERT 1056 { 1057 Label L; 1058 __ movl(rax, access_flags); 1059 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1060 __ jcc(Assembler::zero, L); 1061 __ stop("method needs synchronization"); 1062 __ bind(L); 1063 } 1064 #endif 1065 } 1066 1067 // start execution 1068 #ifdef ASSERT 1069 { 1070 Label L; 1071 const Address monitor_block_top(rbp, 1072 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1073 __ movptr(rax, monitor_block_top); 1074 __ cmpptr(rax, rsp); 1075 __ jcc(Assembler::equal, L); 1076 __ stop("broken stack frame setup in interpreter"); 1077 __ bind(L); 1078 } 1079 #endif 1080 1081 // jvmti support 1082 __ notify_method_entry(); 1083 1084 // work registers 1085 const Register method = rbx; 1086 const Register t = r11; 1087 1088 // allocate space for parameters 1089 __ get_method(method); 1090 __ movptr(t, Address(method, Method::const_offset())); 1091 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1092 __ shll(t, Interpreter::logStackElementSize); 1093 1094 __ subptr(rsp, t); 1095 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1096 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 1097 1098 // get signature handler 1099 { 1100 Label L; 1101 __ movptr(t, Address(method, Method::signature_handler_offset())); 1102 __ testptr(t, t); 1103 __ jcc(Assembler::notZero, L); 1104 __ call_VM(noreg, 1105 CAST_FROM_FN_PTR(address, 1106 InterpreterRuntime::prepare_native_call), 1107 method); 1108 __ get_method(method); 1109 __ movptr(t, Address(method, Method::signature_handler_offset())); 1110 __ bind(L); 1111 } 1112 1113 // call signature handler 1114 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14, 1115 "adjust this code"); 1116 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 1117 "adjust this code"); 1118 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, 1119 "adjust this code"); 1120 1121 // The generated handlers do not touch RBX (the method oop). 1122 // However, large signatures cannot be cached and are generated 1123 // each time here. The slow-path generator can do a GC on return, 1124 // so we must reload it after the call. 1125 __ call(t); 1126 __ get_method(method); // slow path can do a GC, reload RBX 1127 1128 1129 // result handler is in rax 1130 // set result handler 1131 __ movptr(Address(rbp, 1132 (frame::interpreter_frame_result_handler_offset) * wordSize), 1133 rax); 1134 1135 // pass mirror handle if static call 1136 { 1137 Label L; 1138 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1139 __ movl(t, Address(method, Method::access_flags_offset())); 1140 __ testl(t, JVM_ACC_STATIC); 1141 __ jcc(Assembler::zero, L); 1142 // get mirror 1143 __ movptr(t, Address(method, Method::const_offset())); 1144 __ movptr(t, Address(t, ConstMethod::constants_offset())); 1145 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 1146 __ movptr(t, Address(t, mirror_offset)); 1147 // copy mirror into activation frame 1148 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1149 t); 1150 // pass handle to mirror 1151 __ lea(c_rarg1, 1152 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1153 __ bind(L); 1154 } 1155 1156 // get native function entry point 1157 { 1158 Label L; 1159 __ movptr(rax, Address(method, Method::native_function_offset())); 1160 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1161 __ movptr(rscratch2, unsatisfied.addr()); 1162 __ cmpptr(rax, rscratch2); 1163 __ jcc(Assembler::notEqual, L); 1164 __ call_VM(noreg, 1165 CAST_FROM_FN_PTR(address, 1166 InterpreterRuntime::prepare_native_call), 1167 method); 1168 __ get_method(method); 1169 __ movptr(rax, Address(method, Method::native_function_offset())); 1170 __ bind(L); 1171 } 1172 1173 // pass JNIEnv 1174 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1175 1176 // It is enough that the pc() points into the right code 1177 // segment. It does not have to be the correct return pc. 1178 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1179 1180 // change thread state 1181 #ifdef ASSERT 1182 { 1183 Label L; 1184 __ movl(t, Address(r15_thread, JavaThread::thread_state_offset())); 1185 __ cmpl(t, _thread_in_Java); 1186 __ jcc(Assembler::equal, L); 1187 __ stop("Wrong thread state in native stub"); 1188 __ bind(L); 1189 } 1190 #endif 1191 1192 // Change state to native 1193 1194 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), 1195 _thread_in_native); 1196 1197 // Call the native method. 1198 __ call(rax); 1199 // result potentially in rax or xmm0 1200 1201 // Verify or restore cpu control state after JNI call 1202 __ restore_cpu_control_state_after_jni(); 1203 1204 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1205 // in order to extract the result of a method call. If the order of these 1206 // pushes change or anything else is added to the stack then the code in 1207 // interpreter_frame_result must also change. 1208 1209 __ push(dtos); 1210 __ push(ltos); 1211 1212 // change thread state 1213 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), 1214 _thread_in_native_trans); 1215 1216 if (os::is_MP()) { 1217 if (UseMembar) { 1218 // Force this write out before the read below 1219 __ membar(Assembler::Membar_mask_bits( 1220 Assembler::LoadLoad | Assembler::LoadStore | 1221 Assembler::StoreLoad | Assembler::StoreStore)); 1222 } else { 1223 // Write serialization page so VM thread can do a pseudo remote membar. 1224 // We use the current thread pointer to calculate a thread specific 1225 // offset to write to within the page. This minimizes bus traffic 1226 // due to cache line collision. 1227 __ serialize_memory(r15_thread, rscratch2); 1228 } 1229 } 1230 1231 // check for safepoint operation in progress and/or pending suspend requests 1232 { 1233 Label Continue; 1234 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1235 SafepointSynchronize::_not_synchronized); 1236 1237 Label L; 1238 __ jcc(Assembler::notEqual, L); 1239 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0); 1240 __ jcc(Assembler::equal, Continue); 1241 __ bind(L); 1242 1243 // Don't use call_VM as it will see a possible pending exception 1244 // and forward it and never return here preventing us from 1245 // clearing _last_native_pc down below. Also can't use 1246 // call_VM_leaf either as it will check to see if r13 & r14 are 1247 // preserved and correspond to the bcp/locals pointers. So we do a 1248 // runtime call by hand. 1249 // 1250 __ mov(c_rarg0, r15_thread); 1251 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1252 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1253 __ andptr(rsp, -16); // align stack as required by ABI 1254 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1255 __ mov(rsp, r12); // restore sp 1256 __ reinit_heapbase(); 1257 __ bind(Continue); 1258 } 1259 1260 // change thread state 1261 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); 1262 1263 // reset_last_Java_frame 1264 __ reset_last_Java_frame(true, true); 1265 1266 // reset handle block 1267 __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset())); 1268 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1269 1270 // If result is an oop unbox and store it in frame where gc will see it 1271 // and result handler will pick it up 1272 1273 { 1274 Label no_oop, store_result; 1275 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1276 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1277 __ jcc(Assembler::notEqual, no_oop); 1278 // retrieve result 1279 __ pop(ltos); 1280 __ testptr(rax, rax); 1281 __ jcc(Assembler::zero, store_result); 1282 __ movptr(rax, Address(rax, 0)); 1283 __ bind(store_result); 1284 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1285 // keep stack depth as expected by pushing oop which will eventually be discarde 1286 __ push(ltos); 1287 __ bind(no_oop); 1288 } 1289 1290 1291 { 1292 Label no_reguard; 1293 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), 1294 JavaThread::stack_guard_yellow_disabled); 1295 __ jcc(Assembler::notEqual, no_reguard); 1296 1297 __ pusha(); // XXX only save smashed registers 1298 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1299 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1300 __ andptr(rsp, -16); // align stack as required by ABI 1301 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1302 __ mov(rsp, r12); // restore sp 1303 __ popa(); // XXX only restore smashed registers 1304 __ reinit_heapbase(); 1305 1306 __ bind(no_reguard); 1307 } 1308 1309 1310 // The method register is junk from after the thread_in_native transition 1311 // until here. Also can't call_VM until the bcp has been 1312 // restored. Need bcp for throwing exception below so get it now. 1313 __ get_method(method); 1314 1315 // restore r13 to have legal interpreter frame, i.e., bci == 0 <=> 1316 // r13 == code_base() 1317 __ movptr(r13, Address(method, Method::const_offset())); // get ConstMethod* 1318 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase 1319 // handle exceptions (exception handling will handle unlocking!) 1320 { 1321 Label L; 1322 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1323 __ jcc(Assembler::zero, L); 1324 // Note: At some point we may want to unify this with the code 1325 // used in call_VM_base(); i.e., we should use the 1326 // StubRoutines::forward_exception code. For now this doesn't work 1327 // here because the rsp is not correctly set at this point. 1328 __ MacroAssembler::call_VM(noreg, 1329 CAST_FROM_FN_PTR(address, 1330 InterpreterRuntime::throw_pending_exception)); 1331 __ should_not_reach_here(); 1332 __ bind(L); 1333 } 1334 1335 // do unlocking if necessary 1336 { 1337 Label L; 1338 __ movl(t, Address(method, Method::access_flags_offset())); 1339 __ testl(t, JVM_ACC_SYNCHRONIZED); 1340 __ jcc(Assembler::zero, L); 1341 // the code below should be shared with interpreter macro 1342 // assembler implementation 1343 { 1344 Label unlock; 1345 // BasicObjectLock will be first in list, since this is a 1346 // synchronized method. However, need to check that the object 1347 // has not been unlocked by an explicit monitorexit bytecode. 1348 const Address monitor(rbp, 1349 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1350 wordSize - sizeof(BasicObjectLock))); 1351 1352 // monitor expect in c_rarg1 for slow unlock path 1353 __ lea(c_rarg1, monitor); // address of first monitor 1354 1355 __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 1356 __ testptr(t, t); 1357 __ jcc(Assembler::notZero, unlock); 1358 1359 // Entry already unlocked, need to throw exception 1360 __ MacroAssembler::call_VM(noreg, 1361 CAST_FROM_FN_PTR(address, 1362 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1363 __ should_not_reach_here(); 1364 1365 __ bind(unlock); 1366 __ unlock_object(c_rarg1); 1367 } 1368 __ bind(L); 1369 } 1370 1371 // jvmti support 1372 // Note: This must happen _after_ handling/throwing any exceptions since 1373 // the exception handler code notifies the runtime of method exits 1374 // too. If this happens before, method entry/exit notifications are 1375 // not properly paired (was bug - gri 11/22/99). 1376 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1377 1378 // restore potential result in edx:eax, call result handler to 1379 // restore potential result in ST0 & handle result 1380 1381 __ pop(ltos); 1382 __ pop(dtos); 1383 1384 __ movptr(t, Address(rbp, 1385 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1386 __ call(t); 1387 1388 // remove activation 1389 __ movptr(t, Address(rbp, 1390 frame::interpreter_frame_sender_sp_offset * 1391 wordSize)); // get sender sp 1392 __ leave(); // remove frame anchor 1393 __ pop(rdi); // get return address 1394 __ mov(rsp, t); // set sp to sender sp 1395 __ jmp(rdi); 1396 1397 if (inc_counter) { 1398 // Handle overflow of counter and compile method 1399 __ bind(invocation_counter_overflow); 1400 generate_counter_overflow(&continue_after_compile); 1401 } 1402 1403 return entry_point; 1404 } 1405 1406 // 1407 // Generic interpreted method entry to (asm) interpreter 1408 // 1409 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1410 // determine code generation flags 1411 bool inc_counter = UseCompiler || CountCompiledCalls; 1412 1413 // ebx: Method* 1414 // r13: sender sp 1415 address entry_point = __ pc(); 1416 1417 const Address constMethod(rbx, Method::const_offset()); 1418 const Address access_flags(rbx, Method::access_flags_offset()); 1419 const Address size_of_parameters(rdx, 1420 ConstMethod::size_of_parameters_offset()); 1421 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1422 1423 1424 // get parameter size (always needed) 1425 __ movptr(rdx, constMethod); 1426 __ load_unsigned_short(rcx, size_of_parameters); 1427 1428 // rbx: Method* 1429 // rcx: size of parameters 1430 // r13: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1431 1432 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1433 __ subl(rdx, rcx); // rdx = no. of additional locals 1434 1435 // YYY 1436 // __ incrementl(rdx); 1437 // __ andl(rdx, -2); 1438 1439 // see if we've got enough room on the stack for locals plus overhead. 1440 generate_stack_overflow_check(); 1441 1442 // get return address 1443 __ pop(rax); 1444 1445 // compute beginning of parameters (r14) 1446 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); 1447 1448 // rdx - # of additional locals 1449 // allocate space for locals 1450 // explicitly initialize locals 1451 { 1452 Label exit, loop; 1453 __ testl(rdx, rdx); 1454 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1455 __ bind(loop); 1456 __ push((int) NULL_WORD); // initialize local variables 1457 __ decrementl(rdx); // until everything initialized 1458 __ jcc(Assembler::greater, loop); 1459 __ bind(exit); 1460 } 1461 1462 // initialize fixed part of activation frame 1463 generate_fixed_frame(false); 1464 1465 // make sure method is not native & not abstract 1466 #ifdef ASSERT 1467 __ movl(rax, access_flags); 1468 { 1469 Label L; 1470 __ testl(rax, JVM_ACC_NATIVE); 1471 __ jcc(Assembler::zero, L); 1472 __ stop("tried to execute native method as non-native"); 1473 __ bind(L); 1474 } 1475 { 1476 Label L; 1477 __ testl(rax, JVM_ACC_ABSTRACT); 1478 __ jcc(Assembler::zero, L); 1479 __ stop("tried to execute abstract method in interpreter"); 1480 __ bind(L); 1481 } 1482 #endif 1483 1484 // Since at this point in the method invocation the exception 1485 // handler would try to exit the monitor of synchronized methods 1486 // which hasn't been entered yet, we set the thread local variable 1487 // _do_not_unlock_if_synchronized to true. The remove_activation 1488 // will check this flag. 1489 1490 const Address do_not_unlock_if_synchronized(r15_thread, 1491 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1492 __ movbool(do_not_unlock_if_synchronized, true); 1493 1494 // increment invocation count & check for overflow 1495 Label invocation_counter_overflow; 1496 Label profile_method; 1497 Label profile_method_continue; 1498 if (inc_counter) { 1499 generate_counter_incr(&invocation_counter_overflow, 1500 &profile_method, 1501 &profile_method_continue); 1502 if (ProfileInterpreter) { 1503 __ bind(profile_method_continue); 1504 } 1505 } 1506 1507 Label continue_after_compile; 1508 __ bind(continue_after_compile); 1509 1510 // check for synchronized interpreted methods 1511 bang_stack_shadow_pages(false); 1512 1513 // reset the _do_not_unlock_if_synchronized flag 1514 __ movbool(do_not_unlock_if_synchronized, false); 1515 1516 // check for synchronized methods 1517 // Must happen AFTER invocation_counter check and stack overflow check, 1518 // so method is not locked if overflows. 1519 if (synchronized) { 1520 // Allocate monitor and lock method 1521 lock_method(); 1522 } else { 1523 // no synchronization necessary 1524 #ifdef ASSERT 1525 { 1526 Label L; 1527 __ movl(rax, access_flags); 1528 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1529 __ jcc(Assembler::zero, L); 1530 __ stop("method needs synchronization"); 1531 __ bind(L); 1532 } 1533 #endif 1534 } 1535 1536 // start execution 1537 #ifdef ASSERT 1538 { 1539 Label L; 1540 const Address monitor_block_top (rbp, 1541 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1542 __ movptr(rax, monitor_block_top); 1543 __ cmpptr(rax, rsp); 1544 __ jcc(Assembler::equal, L); 1545 __ stop("broken stack frame setup in interpreter"); 1546 __ bind(L); 1547 } 1548 #endif 1549 1550 // jvmti support 1551 __ notify_method_entry(); 1552 1553 __ dispatch_next(vtos); 1554 1555 // invocation counter overflow 1556 if (inc_counter) { 1557 if (ProfileInterpreter) { 1558 // We have decided to profile this method in the interpreter 1559 __ bind(profile_method); 1560 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1561 __ set_method_data_pointer_for_bcp(); 1562 __ get_method(rbx); 1563 __ jmp(profile_method_continue); 1564 } 1565 // Handle overflow of counter and compile method 1566 __ bind(invocation_counter_overflow); 1567 generate_counter_overflow(&continue_after_compile); 1568 } 1569 1570 return entry_point; 1571 } 1572 1573 // Entry points 1574 // 1575 // Here we generate the various kind of entries into the interpreter. 1576 // The two main entry type are generic bytecode methods and native 1577 // call method. These both come in synchronized and non-synchronized 1578 // versions but the frame layout they create is very similar. The 1579 // other method entry types are really just special purpose entries 1580 // that are really entry and interpretation all in one. These are for 1581 // trivial methods like accessor, empty, or special math methods. 1582 // 1583 // When control flow reaches any of the entry types for the interpreter 1584 // the following holds -> 1585 // 1586 // Arguments: 1587 // 1588 // rbx: Method* 1589 // 1590 // Stack layout immediately at entry 1591 // 1592 // [ return address ] <--- rsp 1593 // [ parameter n ] 1594 // ... 1595 // [ parameter 1 ] 1596 // [ expression stack ] (caller's java expression stack) 1597 1598 // Assuming that we don't go to one of the trivial specialized entries 1599 // the stack will look like below when we are ready to execute the 1600 // first bytecode (or call the native routine). The register usage 1601 // will be as the template based interpreter expects (see 1602 // interpreter_amd64.hpp). 1603 // 1604 // local variables follow incoming parameters immediately; i.e. 1605 // the return address is moved to the end of the locals). 1606 // 1607 // [ monitor entry ] <--- rsp 1608 // ... 1609 // [ monitor entry ] 1610 // [ expr. stack bottom ] 1611 // [ saved r13 ] 1612 // [ current r14 ] 1613 // [ Method* ] 1614 // [ saved ebp ] <--- rbp 1615 // [ return address ] 1616 // [ local variable m ] 1617 // ... 1618 // [ local variable 1 ] 1619 // [ parameter n ] 1620 // ... 1621 // [ parameter 1 ] <--- r14 1622 1623 address AbstractInterpreterGenerator::generate_method_entry( 1624 AbstractInterpreter::MethodKind kind) { 1625 // determine code generation flags 1626 bool synchronized = false; 1627 address entry_point = NULL; 1628 InterpreterGenerator* ig_this = (InterpreterGenerator*)this; 1629 1630 switch (kind) { 1631 case Interpreter::zerolocals : break; 1632 case Interpreter::zerolocals_synchronized: synchronized = true; break; 1633 case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; 1634 case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; 1635 case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; 1636 case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; 1637 case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; 1638 1639 case Interpreter::java_lang_math_sin : // fall thru 1640 case Interpreter::java_lang_math_cos : // fall thru 1641 case Interpreter::java_lang_math_tan : // fall thru 1642 case Interpreter::java_lang_math_abs : // fall thru 1643 case Interpreter::java_lang_math_log : // fall thru 1644 case Interpreter::java_lang_math_log10 : // fall thru 1645 case Interpreter::java_lang_math_sqrt : // fall thru 1646 case Interpreter::java_lang_math_pow : // fall thru 1647 case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; 1648 case Interpreter::java_lang_ref_reference_get 1649 : entry_point = ig_this->generate_Reference_get_entry(); break; 1650 case Interpreter::java_util_zip_CRC32_update 1651 : entry_point = ig_this->generate_CRC32_update_entry(); break; 1652 case Interpreter::java_util_zip_CRC32_updateBytes 1653 : // fall thru 1654 case Interpreter::java_util_zip_CRC32_updateByteBuffer 1655 : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; 1656 default: 1657 fatal(err_msg("unexpected method kind: %d", kind)); 1658 break; 1659 } 1660 1661 if (entry_point) { 1662 return entry_point; 1663 } 1664 1665 return ig_this->generate_normal_entry(synchronized); 1666 } 1667 1668 // These should never be compiled since the interpreter will prefer 1669 // the compiled version to the intrinsic version. 1670 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1671 switch (method_kind(m)) { 1672 case Interpreter::java_lang_math_sin : // fall thru 1673 case Interpreter::java_lang_math_cos : // fall thru 1674 case Interpreter::java_lang_math_tan : // fall thru 1675 case Interpreter::java_lang_math_abs : // fall thru 1676 case Interpreter::java_lang_math_log : // fall thru 1677 case Interpreter::java_lang_math_log10 : // fall thru 1678 case Interpreter::java_lang_math_sqrt : // fall thru 1679 case Interpreter::java_lang_math_pow : // fall thru 1680 case Interpreter::java_lang_math_exp : 1681 return false; 1682 default: 1683 return true; 1684 } 1685 } 1686 1687 // How much stack a method activation needs in words. 1688 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1689 const int entry_size = frame::interpreter_frame_monitor_size(); 1690 1691 // total overhead size: entry_size + (saved rbp thru expr stack 1692 // bottom). be sure to change this if you add/subtract anything 1693 // to/from the overhead area 1694 const int overhead_size = 1695 -(frame::interpreter_frame_initial_sp_offset) + entry_size; 1696 1697 const int stub_code = frame::entry_frame_after_call_words; 1698 const int method_stack = (method->max_locals() + method->max_stack()) * 1699 Interpreter::stackElementWords; 1700 return (overhead_size + method_stack + stub_code); 1701 } 1702 1703 int AbstractInterpreter::layout_activation(Method* method, 1704 int tempcount, 1705 int popframe_extra_args, 1706 int moncount, 1707 int caller_actual_parameters, 1708 int callee_param_count, 1709 int callee_locals, 1710 frame* caller, 1711 frame* interpreter_frame, 1712 bool is_top_frame, 1713 bool is_bottom_frame) { 1714 // Note: This calculation must exactly parallel the frame setup 1715 // in AbstractInterpreterGenerator::generate_method_entry. 1716 // If interpreter_frame!=NULL, set up the method, locals, and monitors. 1717 // The frame interpreter_frame, if not NULL, is guaranteed to be the 1718 // right size, as determined by a previous call to this method. 1719 // It is also guaranteed to be walkable even though it is in a skeletal state 1720 1721 // fixed size of an interpreter frame: 1722 int max_locals = method->max_locals() * Interpreter::stackElementWords; 1723 int extra_locals = (method->max_locals() - method->size_of_parameters()) * 1724 Interpreter::stackElementWords; 1725 1726 int overhead = frame::sender_sp_offset - 1727 frame::interpreter_frame_initial_sp_offset; 1728 // Our locals were accounted for by the caller (or last_frame_adjust 1729 // on the transistion) Since the callee parameters already account 1730 // for the callee's params we only need to account for the extra 1731 // locals. 1732 int size = overhead + 1733 (callee_locals - callee_param_count)*Interpreter::stackElementWords + 1734 moncount * frame::interpreter_frame_monitor_size() + 1735 tempcount* Interpreter::stackElementWords + popframe_extra_args; 1736 if (interpreter_frame != NULL) { 1737 #ifdef ASSERT 1738 if (!EnableInvokeDynamic) 1739 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences? 1740 // Probably, since deoptimization doesn't work yet. 1741 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable"); 1742 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)"); 1743 #endif 1744 1745 interpreter_frame->interpreter_frame_set_method(method); 1746 // NOTE the difference in using sender_sp and 1747 // interpreter_frame_sender_sp interpreter_frame_sender_sp is 1748 // the original sp of the caller (the unextended_sp) and 1749 // sender_sp is fp+16 XXX 1750 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; 1751 1752 #ifdef ASSERT 1753 if (caller->is_interpreted_frame()) { 1754 assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); 1755 } 1756 #endif 1757 1758 interpreter_frame->interpreter_frame_set_locals(locals); 1759 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); 1760 BasicObjectLock* monbot = montop - moncount; 1761 interpreter_frame->interpreter_frame_set_monitor_end(monbot); 1762 1763 // Set last_sp 1764 intptr_t* esp = (intptr_t*) monbot - 1765 tempcount*Interpreter::stackElementWords - 1766 popframe_extra_args; 1767 interpreter_frame->interpreter_frame_set_last_sp(esp); 1768 1769 // All frames but the initial (oldest) interpreter frame we fill in have 1770 // a value for sender_sp that allows walking the stack but isn't 1771 // truly correct. Correct the value here. 1772 if (extra_locals != 0 && 1773 interpreter_frame->sender_sp() == 1774 interpreter_frame->interpreter_frame_sender_sp()) { 1775 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + 1776 extra_locals); 1777 } 1778 *interpreter_frame->interpreter_frame_cache_addr() = 1779 method->constants()->cache(); 1780 } 1781 return size; 1782 } 1783 1784 //----------------------------------------------------------------------------- 1785 // Exceptions 1786 1787 void TemplateInterpreterGenerator::generate_throw_exception() { 1788 // Entry point in previous activation (i.e., if the caller was 1789 // interpreted) 1790 Interpreter::_rethrow_exception_entry = __ pc(); 1791 // Restore sp to interpreter_frame_last_sp even though we are going 1792 // to empty the expression stack for the exception processing. 1793 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1794 // rax: exception 1795 // rdx: return address/pc that threw exception 1796 __ restore_bcp(); // r13 points to call/send 1797 __ restore_locals(); 1798 __ reinit_heapbase(); // restore r12 as heapbase. 1799 // Entry point for exceptions thrown within interpreter code 1800 Interpreter::_throw_exception_entry = __ pc(); 1801 // expression stack is undefined here 1802 // rax: exception 1803 // r13: exception bcp 1804 __ verify_oop(rax); 1805 __ mov(c_rarg1, rax); 1806 1807 // expression stack must be empty before entering the VM in case of 1808 // an exception 1809 __ empty_expression_stack(); 1810 // find exception handler address and preserve exception oop 1811 __ call_VM(rdx, 1812 CAST_FROM_FN_PTR(address, 1813 InterpreterRuntime::exception_handler_for_exception), 1814 c_rarg1); 1815 // rax: exception handler entry point 1816 // rdx: preserved exception oop 1817 // r13: bcp for exception handler 1818 __ push_ptr(rdx); // push exception which is now the only value on the stack 1819 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1820 1821 // If the exception is not handled in the current frame the frame is 1822 // removed and the exception is rethrown (i.e. exception 1823 // continuation is _rethrow_exception). 1824 // 1825 // Note: At this point the bci is still the bxi for the instruction 1826 // which caused the exception and the expression stack is 1827 // empty. Thus, for any VM calls at this point, GC will find a legal 1828 // oop map (with empty expression stack). 1829 1830 // In current activation 1831 // tos: exception 1832 // esi: exception bcp 1833 1834 // 1835 // JVMTI PopFrame support 1836 // 1837 1838 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1839 __ empty_expression_stack(); 1840 // Set the popframe_processing bit in pending_popframe_condition 1841 // indicating that we are currently handling popframe, so that 1842 // call_VMs that may happen later do not trigger new popframe 1843 // handling cycles. 1844 __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset())); 1845 __ orl(rdx, JavaThread::popframe_processing_bit); 1846 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx); 1847 1848 { 1849 // Check to see whether we are returning to a deoptimized frame. 1850 // (The PopFrame call ensures that the caller of the popped frame is 1851 // either interpreted or compiled and deoptimizes it if compiled.) 1852 // In this case, we can't call dispatch_next() after the frame is 1853 // popped, but instead must save the incoming arguments and restore 1854 // them after deoptimization has occurred. 1855 // 1856 // Note that we don't compare the return PC against the 1857 // deoptimization blob's unpack entry because of the presence of 1858 // adapter frames in C2. 1859 Label caller_not_deoptimized; 1860 __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize)); 1861 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1862 InterpreterRuntime::interpreter_contains), c_rarg1); 1863 __ testl(rax, rax); 1864 __ jcc(Assembler::notZero, caller_not_deoptimized); 1865 1866 // Compute size of arguments for saving when returning to 1867 // deoptimized caller 1868 __ get_method(rax); 1869 __ movptr(rax, Address(rax, Method::const_offset())); 1870 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1871 size_of_parameters_offset()))); 1872 __ shll(rax, Interpreter::logStackElementSize); 1873 __ restore_locals(); // XXX do we need this? 1874 __ subptr(r14, rax); 1875 __ addptr(r14, wordSize); 1876 // Save these arguments 1877 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1878 Deoptimization:: 1879 popframe_preserve_args), 1880 r15_thread, rax, r14); 1881 1882 __ remove_activation(vtos, rdx, 1883 /* throw_monitor_exception */ false, 1884 /* install_monitor_exception */ false, 1885 /* notify_jvmdi */ false); 1886 1887 // Inform deoptimization that it is responsible for restoring 1888 // these arguments 1889 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), 1890 JavaThread::popframe_force_deopt_reexecution_bit); 1891 1892 // Continue in deoptimization handler 1893 __ jmp(rdx); 1894 1895 __ bind(caller_not_deoptimized); 1896 } 1897 1898 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1899 /* throw_monitor_exception */ false, 1900 /* install_monitor_exception */ false, 1901 /* notify_jvmdi */ false); 1902 1903 // Finish with popframe handling 1904 // A previous I2C followed by a deoptimization might have moved the 1905 // outgoing arguments further up the stack. PopFrame expects the 1906 // mutations to those outgoing arguments to be preserved and other 1907 // constraints basically require this frame to look exactly as 1908 // though it had previously invoked an interpreted activation with 1909 // no space between the top of the expression stack (current 1910 // last_sp) and the top of stack. Rather than force deopt to 1911 // maintain this kind of invariant all the time we call a small 1912 // fixup routine to move the mutated arguments onto the top of our 1913 // expression stack if necessary. 1914 __ mov(c_rarg1, rsp); 1915 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1916 // PC must point into interpreter here 1917 __ set_last_Java_frame(noreg, rbp, __ pc()); 1918 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1919 __ reset_last_Java_frame(true, true); 1920 // Restore the last_sp and null it out 1921 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1922 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1923 1924 __ restore_bcp(); // XXX do we need this? 1925 __ restore_locals(); // XXX do we need this? 1926 // The method data pointer was incremented already during 1927 // call profiling. We have to restore the mdp for the current bcp. 1928 if (ProfileInterpreter) { 1929 __ set_method_data_pointer_for_bcp(); 1930 } 1931 1932 // Clear the popframe condition flag 1933 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), 1934 JavaThread::popframe_inactive); 1935 1936 #if INCLUDE_JVMTI 1937 if (EnableInvokeDynamic) { 1938 Label L_done; 1939 const Register local0 = r14; 1940 1941 __ cmpb(Address(r13, 0), Bytecodes::_invokestatic); 1942 __ jcc(Assembler::notEqual, L_done); 1943 1944 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1945 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1946 1947 __ get_method(rdx); 1948 __ movptr(rax, Address(local0, 0)); 1949 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13); 1950 1951 __ testptr(rax, rax); 1952 __ jcc(Assembler::zero, L_done); 1953 1954 __ movptr(Address(rbx, 0), rax); 1955 __ bind(L_done); 1956 } 1957 #endif // INCLUDE_JVMTI 1958 1959 __ dispatch_next(vtos); 1960 // end of PopFrame support 1961 1962 Interpreter::_remove_activation_entry = __ pc(); 1963 1964 // preserve exception over this code sequence 1965 __ pop_ptr(rax); 1966 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax); 1967 // remove the activation (without doing throws on illegalMonitorExceptions) 1968 __ remove_activation(vtos, rdx, false, true, false); 1969 // restore exception 1970 __ get_vm_result(rax, r15_thread); 1971 1972 // In between activations - previous activation type unknown yet 1973 // compute continuation point - the continuation point expects the 1974 // following registers set up: 1975 // 1976 // rax: exception 1977 // rdx: return address/pc that threw exception 1978 // rsp: expression stack of caller 1979 // rbp: ebp of caller 1980 __ push(rax); // save exception 1981 __ push(rdx); // save return address 1982 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1983 SharedRuntime::exception_handler_for_return_address), 1984 r15_thread, rdx); 1985 __ mov(rbx, rax); // save exception handler 1986 __ pop(rdx); // restore return address 1987 __ pop(rax); // restore exception 1988 // Note that an "issuing PC" is actually the next PC after the call 1989 __ jmp(rbx); // jump to exception 1990 // handler of caller 1991 } 1992 1993 1994 // 1995 // JVMTI ForceEarlyReturn support 1996 // 1997 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1998 address entry = __ pc(); 1999 2000 __ restore_bcp(); 2001 __ restore_locals(); 2002 __ empty_expression_stack(); 2003 __ load_earlyret_value(state); 2004 2005 __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); 2006 Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset()); 2007 2008 // Clear the earlyret state 2009 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 2010 2011 __ remove_activation(state, rsi, 2012 false, /* throw_monitor_exception */ 2013 false, /* install_monitor_exception */ 2014 true); /* notify_jvmdi */ 2015 __ jmp(rsi); 2016 2017 return entry; 2018 } // end of ForceEarlyReturn support 2019 2020 2021 //----------------------------------------------------------------------------- 2022 // Helper for vtos entry point generation 2023 2024 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 2025 address& bep, 2026 address& cep, 2027 address& sep, 2028 address& aep, 2029 address& iep, 2030 address& lep, 2031 address& fep, 2032 address& dep, 2033 address& vep) { 2034 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2035 Label L; 2036 aep = __ pc(); __ push_ptr(); __ jmp(L); 2037 fep = __ pc(); __ push_f(); __ jmp(L); 2038 dep = __ pc(); __ push_d(); __ jmp(L); 2039 lep = __ pc(); __ push_l(); __ jmp(L); 2040 bep = cep = sep = 2041 iep = __ pc(); __ push_i(); 2042 vep = __ pc(); 2043 __ bind(L); 2044 generate_and_dispatch(t); 2045 } 2046 2047 2048 //----------------------------------------------------------------------------- 2049 // Generation of individual instructions 2050 2051 // helpers for generate_and_dispatch 2052 2053 2054 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 2055 : TemplateInterpreterGenerator(code) { 2056 generate_all(); // down here so it can be "virtual" 2057 } 2058 2059 //----------------------------------------------------------------------------- 2060 2061 // Non-product code 2062 #ifndef PRODUCT 2063 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2064 address entry = __ pc(); 2065 2066 __ push(state); 2067 __ push(c_rarg0); 2068 __ push(c_rarg1); 2069 __ push(c_rarg2); 2070 __ push(c_rarg3); 2071 __ mov(c_rarg2, rax); // Pass itos 2072 #ifdef _WIN64 2073 __ movflt(xmm3, xmm0); // Pass ftos 2074 #endif 2075 __ call_VM(noreg, 2076 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), 2077 c_rarg1, c_rarg2, c_rarg3); 2078 __ pop(c_rarg3); 2079 __ pop(c_rarg2); 2080 __ pop(c_rarg1); 2081 __ pop(c_rarg0); 2082 __ pop(state); 2083 __ ret(0); // return from result handler 2084 2085 return entry; 2086 } 2087 2088 void TemplateInterpreterGenerator::count_bytecode() { 2089 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 2090 } 2091 2092 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2093 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 2094 } 2095 2096 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2097 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 2098 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 2099 __ orl(rbx, 2100 ((int) t->bytecode()) << 2101 BytecodePairHistogram::log2_number_of_codes); 2102 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 2103 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 2104 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 2105 } 2106 2107 2108 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2109 // Call a little run-time stub to avoid blow-up for each bytecode. 2110 // The run-time runtime saves the right registers, depending on 2111 // the tosca in-state for the given template. 2112 2113 assert(Interpreter::trace_code(t->tos_in()) != NULL, 2114 "entry must have been generated"); 2115 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 2116 __ andptr(rsp, -16); // align stack as required by ABI 2117 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 2118 __ mov(rsp, r12); // restore sp 2119 __ reinit_heapbase(); 2120 } 2121 2122 2123 void TemplateInterpreterGenerator::stop_interpreter_at() { 2124 Label L; 2125 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 2126 StopInterpreterAt); 2127 __ jcc(Assembler::notEqual, L); 2128 __ int3(); 2129 __ bind(L); 2130 } 2131 #endif // !PRODUCT 2132 #endif // ! CC_INTERP