1 /* 2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 47 #include "utilities/macros.hpp" 48 49 #define __ _masm-> 50 51 #ifndef CC_INTERP 52 53 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 54 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; 55 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 56 57 //----------------------------------------------------------------------------- 58 59 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 60 address entry = __ pc(); 61 62 #ifdef ASSERT 63 { 64 Label L; 65 __ lea(rax, Address(rbp, 66 frame::interpreter_frame_monitor_block_top_offset * 67 wordSize)); 68 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 69 // grows negative) 70 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 71 __ stop ("interpreter frame not set up"); 72 __ bind(L); 73 } 74 #endif // ASSERT 75 // Restore bcp under the assumption that the current frame is still 76 // interpreted 77 __ restore_bcp(); 78 79 // expression stack must be empty before entering the VM if an 80 // exception happened 81 __ empty_expression_stack(); 82 // throw exception 83 __ call_VM(noreg, 84 CAST_FROM_FN_PTR(address, 85 InterpreterRuntime::throw_StackOverflowError)); 86 return entry; 87 } 88 89 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 90 const char* name) { 91 address entry = __ pc(); 92 // expression stack must be empty before entering the VM if an 93 // exception happened 94 __ empty_expression_stack(); 95 // setup parameters 96 // ??? convention: expect aberrant index in register ebx 97 __ lea(c_rarg1, ExternalAddress((address)name)); 98 __ call_VM(noreg, 99 CAST_FROM_FN_PTR(address, 100 InterpreterRuntime:: 101 throw_ArrayIndexOutOfBoundsException), 102 c_rarg1, rbx); 103 return entry; 104 } 105 106 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 107 address entry = __ pc(); 108 109 // object is at TOS 110 __ pop(c_rarg1); 111 112 // expression stack must be empty before entering the VM if an 113 // exception happened 114 __ empty_expression_stack(); 115 116 __ call_VM(noreg, 117 CAST_FROM_FN_PTR(address, 118 InterpreterRuntime:: 119 throw_ClassCastException), 120 c_rarg1); 121 return entry; 122 } 123 124 address TemplateInterpreterGenerator::generate_exception_handler_common( 125 const char* name, const char* message, bool pass_oop) { 126 assert(!pass_oop || message == NULL, "either oop or message but not both"); 127 address entry = __ pc(); 128 if (pass_oop) { 129 // object is at TOS 130 __ pop(c_rarg2); 131 } 132 // expression stack must be empty before entering the VM if an 133 // exception happened 134 __ empty_expression_stack(); 135 // setup parameters 136 __ lea(c_rarg1, ExternalAddress((address)name)); 137 if (pass_oop) { 138 __ call_VM(rax, CAST_FROM_FN_PTR(address, 139 InterpreterRuntime:: 140 create_klass_exception), 141 c_rarg1, c_rarg2); 142 } else { 143 // kind of lame ExternalAddress can't take NULL because 144 // external_word_Relocation will assert. 145 if (message != NULL) { 146 __ lea(c_rarg2, ExternalAddress((address)message)); 147 } else { 148 __ movptr(c_rarg2, NULL_WORD); 149 } 150 __ call_VM(rax, 151 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 152 c_rarg1, c_rarg2); 153 } 154 // throw exception 155 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 156 return entry; 157 } 158 159 160 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 161 address entry = __ pc(); 162 // NULL last_sp until next java call 163 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 164 __ dispatch_next(state); 165 return entry; 166 } 167 168 169 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { 170 address entry = __ pc(); 171 172 // Restore stack bottom in case i2c adjusted stack 173 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 174 // and NULL it as marker that esp is now tos until next java call 175 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 176 177 __ restore_bcp(); 178 __ restore_locals(); 179 180 Label L_got_cache, L_giant_index; 181 if (EnableInvokeDynamic) { 182 __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic); 183 __ jcc(Assembler::equal, L_giant_index); 184 } 185 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); 186 __ bind(L_got_cache); 187 __ movl(rbx, Address(rbx, rcx, 188 Address::times_ptr, 189 in_bytes(ConstantPoolCache::base_offset()) + 190 3 * wordSize)); 191 __ andl(rbx, 0xFF); 192 __ lea(rsp, Address(rsp, rbx, Address::times_8)); 193 __ dispatch_next(state, step); 194 195 // out of the main line of code... 196 if (EnableInvokeDynamic) { 197 __ bind(L_giant_index); 198 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); 199 __ jmp(L_got_cache); 200 } 201 202 return entry; 203 } 204 205 206 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 207 int step) { 208 address entry = __ pc(); 209 // NULL last_sp until next java call 210 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 211 __ restore_bcp(); 212 __ restore_locals(); 213 // handle exceptions 214 { 215 Label L; 216 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 217 __ jcc(Assembler::zero, L); 218 __ call_VM(noreg, 219 CAST_FROM_FN_PTR(address, 220 InterpreterRuntime::throw_pending_exception)); 221 __ should_not_reach_here(); 222 __ bind(L); 223 } 224 __ dispatch_next(state, step); 225 return entry; 226 } 227 228 int AbstractInterpreter::BasicType_as_index(BasicType type) { 229 int i = 0; 230 switch (type) { 231 case T_BOOLEAN: i = 0; break; 232 case T_CHAR : i = 1; break; 233 case T_BYTE : i = 2; break; 234 case T_SHORT : i = 3; break; 235 case T_INT : i = 4; break; 236 case T_LONG : i = 5; break; 237 case T_VOID : i = 6; break; 238 case T_FLOAT : i = 7; break; 239 case T_DOUBLE : i = 8; break; 240 case T_OBJECT : i = 9; break; 241 case T_ARRAY : i = 9; break; 242 default : ShouldNotReachHere(); 243 } 244 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, 245 "index out of bounds"); 246 return i; 247 } 248 249 250 address TemplateInterpreterGenerator::generate_result_handler_for( 251 BasicType type) { 252 address entry = __ pc(); 253 switch (type) { 254 case T_BOOLEAN: __ c2bool(rax); break; 255 case T_CHAR : __ movzwl(rax, rax); break; 256 case T_BYTE : __ sign_extend_byte(rax); break; 257 case T_SHORT : __ sign_extend_short(rax); break; 258 case T_INT : /* nothing to do */ break; 259 case T_LONG : /* nothing to do */ break; 260 case T_VOID : /* nothing to do */ break; 261 case T_FLOAT : /* nothing to do */ break; 262 case T_DOUBLE : /* nothing to do */ break; 263 case T_OBJECT : 264 // retrieve result from frame 265 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 266 // and verify it 267 __ verify_oop(rax); 268 break; 269 default : ShouldNotReachHere(); 270 } 271 __ ret(0); // return from result handler 272 return entry; 273 } 274 275 address TemplateInterpreterGenerator::generate_safept_entry_for( 276 TosState state, 277 address runtime_entry) { 278 address entry = __ pc(); 279 __ push(state); 280 __ call_VM(noreg, runtime_entry); 281 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 282 return entry; 283 } 284 285 286 287 // Helpers for commoning out cases in the various type of method entries. 288 // 289 290 291 // increment invocation count & check for overflow 292 // 293 // Note: checking for negative value instead of overflow 294 // so we have a 'sticky' overflow test 295 // 296 // rbx: method 297 // ecx: invocation counter 298 // 299 void InterpreterGenerator::generate_counter_incr( 300 Label* overflow, 301 Label* profile_method, 302 Label* profile_method_continue) { 303 const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) + 304 in_bytes(InvocationCounter::counter_offset())); 305 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 306 if (TieredCompilation) { 307 int increment = InvocationCounter::count_increment; 308 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 309 Label no_mdo, done; 310 if (ProfileInterpreter) { 311 // Are we profiling? 312 __ movptr(rax, Address(rbx, Method::method_data_offset())); 313 __ testptr(rax, rax); 314 __ jccb(Assembler::zero, no_mdo); 315 // Increment counter in the MDO 316 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 317 in_bytes(InvocationCounter::counter_offset())); 318 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 319 __ jmpb(done); 320 } 321 __ bind(no_mdo); 322 // Increment counter in Method* (we don't need to load it, it's in ecx). 323 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); 324 __ bind(done); 325 } else { 326 const Address backedge_counter(rbx, 327 Method::backedge_counter_offset() + 328 InvocationCounter::counter_offset()); 329 330 if (ProfileInterpreter) { // %%% Merge this into MethodData* 331 __ incrementl(Address(rbx, 332 Method::interpreter_invocation_counter_offset())); 333 } 334 // Update standard invocation counters 335 __ movl(rax, backedge_counter); // load backedge counter 336 337 __ incrementl(rcx, InvocationCounter::count_increment); 338 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 339 340 __ movl(invocation_counter, rcx); // save invocation count 341 __ addl(rcx, rax); // add both counters 342 343 // profile_method is non-null only for interpreted method so 344 // profile_method != NULL == !native_call 345 346 if (ProfileInterpreter && profile_method != NULL) { 347 // Test to see if we should create a method data oop 348 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 349 __ jcc(Assembler::less, *profile_method_continue); 350 351 // if no method data exists, go to profile_method 352 __ test_method_data_pointer(rax, *profile_method); 353 } 354 355 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 356 __ jcc(Assembler::aboveEqual, *overflow); 357 } 358 } 359 360 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 361 362 // Asm interpreter on entry 363 // r14 - locals 364 // r13 - bcp 365 // rbx - method 366 // edx - cpool --- DOES NOT APPEAR TO BE TRUE 367 // rbp - interpreter frame 368 369 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 370 // Everything as it was on entry 371 // rdx is not restored. Doesn't appear to really be set. 372 373 // InterpreterRuntime::frequency_counter_overflow takes two 374 // arguments, the first (thread) is passed by call_VM, the second 375 // indicates if the counter overflow occurs at a backwards branch 376 // (NULL bcp). We pass zero for it. The call returns the address 377 // of the verified entry point for the method or NULL if the 378 // compilation did not complete (either went background or bailed 379 // out). 380 __ movl(c_rarg1, 0); 381 __ call_VM(noreg, 382 CAST_FROM_FN_PTR(address, 383 InterpreterRuntime::frequency_counter_overflow), 384 c_rarg1); 385 386 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 387 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 388 // and jump to the interpreted entry. 389 __ jmp(*do_continue, relocInfo::none); 390 } 391 392 // See if we've got enough room on the stack for locals plus overhead. 393 // The expression stack grows down incrementally, so the normal guard 394 // page mechanism will work for that. 395 // 396 // NOTE: Since the additional locals are also always pushed (wasn't 397 // obvious in generate_method_entry) so the guard should work for them 398 // too. 399 // 400 // Args: 401 // rdx: number of additional locals this frame needs (what we must check) 402 // rbx: Method* 403 // 404 // Kills: 405 // rax 406 void InterpreterGenerator::generate_stack_overflow_check(void) { 407 408 // monitor entry size: see picture of stack set 409 // (generate_method_entry) and frame_amd64.hpp 410 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 411 412 // total overhead size: entry_size + (saved rbp through expr stack 413 // bottom). be sure to change this if you add/subtract anything 414 // to/from the overhead area 415 const int overhead_size = 416 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 417 418 const int page_size = os::vm_page_size(); 419 420 Label after_frame_check; 421 422 // see if the frame is greater than one page in size. If so, 423 // then we need to verify there is enough stack space remaining 424 // for the additional locals. 425 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 426 __ jcc(Assembler::belowEqual, after_frame_check); 427 428 // compute rsp as if this were going to be the last frame on 429 // the stack before the red zone 430 431 const Address stack_base(r15_thread, Thread::stack_base_offset()); 432 const Address stack_size(r15_thread, Thread::stack_size_offset()); 433 434 // locals + overhead, in bytes 435 __ mov(rax, rdx); 436 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter. 437 __ addptr(rax, overhead_size); 438 439 #ifdef ASSERT 440 Label stack_base_okay, stack_size_okay; 441 // verify that thread stack base is non-zero 442 __ cmpptr(stack_base, (int32_t)NULL_WORD); 443 __ jcc(Assembler::notEqual, stack_base_okay); 444 __ stop("stack base is zero"); 445 __ bind(stack_base_okay); 446 // verify that thread stack size is non-zero 447 __ cmpptr(stack_size, 0); 448 __ jcc(Assembler::notEqual, stack_size_okay); 449 __ stop("stack size is zero"); 450 __ bind(stack_size_okay); 451 #endif 452 453 // Add stack base to locals and subtract stack size 454 __ addptr(rax, stack_base); 455 __ subptr(rax, stack_size); 456 457 // Use the maximum number of pages we might bang. 458 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 459 (StackRedPages+StackYellowPages); 460 461 // add in the red and yellow zone sizes 462 __ addptr(rax, max_pages * page_size); 463 464 // check against the current stack bottom 465 __ cmpptr(rsp, rax); 466 __ jcc(Assembler::above, after_frame_check); 467 468 // Restore sender's sp as SP. This is necessary if the sender's 469 // frame is an extended compiled frame (see gen_c2i_adapter()) 470 // and safer anyway in case of JSR292 adaptations. 471 472 __ pop(rax); // return address must be moved if SP is changed 473 __ mov(rsp, r13); 474 __ push(rax); 475 476 // Note: the restored frame is not necessarily interpreted. 477 // Use the shared runtime version of the StackOverflowError. 478 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 479 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 480 481 // all done with frame size check 482 __ bind(after_frame_check); 483 } 484 485 // Allocate monitor and lock method (asm interpreter) 486 // 487 // Args: 488 // rbx: Method* 489 // r14: locals 490 // 491 // Kills: 492 // rax 493 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 494 // rscratch1, rscratch2 (scratch regs) 495 void InterpreterGenerator::lock_method(void) { 496 // synchronize method 497 const Address access_flags(rbx, Method::access_flags_offset()); 498 const Address monitor_block_top( 499 rbp, 500 frame::interpreter_frame_monitor_block_top_offset * wordSize); 501 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 502 503 #ifdef ASSERT 504 { 505 Label L; 506 __ movl(rax, access_flags); 507 __ testl(rax, JVM_ACC_SYNCHRONIZED); 508 __ jcc(Assembler::notZero, L); 509 __ stop("method doesn't need synchronization"); 510 __ bind(L); 511 } 512 #endif // ASSERT 513 514 // get synchronization object 515 { 516 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 517 Label done; 518 __ movl(rax, access_flags); 519 __ testl(rax, JVM_ACC_STATIC); 520 // get receiver (assume this is frequent case) 521 __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0))); 522 __ jcc(Assembler::zero, done); 523 __ movptr(rax, Address(rbx, Method::const_offset())); 524 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); 525 __ movptr(rax, Address(rax, 526 ConstantPool::pool_holder_offset_in_bytes())); 527 __ movptr(rax, Address(rax, mirror_offset)); 528 529 #ifdef ASSERT 530 { 531 Label L; 532 __ testptr(rax, rax); 533 __ jcc(Assembler::notZero, L); 534 __ stop("synchronization object is NULL"); 535 __ bind(L); 536 } 537 #endif // ASSERT 538 539 __ bind(done); 540 } 541 542 // add space for monitor & lock 543 __ subptr(rsp, entry_size); // add space for a monitor entry 544 __ movptr(monitor_block_top, rsp); // set new monitor block top 545 // store object 546 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 547 __ movptr(c_rarg1, rsp); // object address 548 __ lock_object(c_rarg1); 549 } 550 551 // Generate a fixed interpreter frame. This is identical setup for 552 // interpreted methods and for native methods hence the shared code. 553 // 554 // Args: 555 // rax: return address 556 // rbx: Method* 557 // r14: pointer to locals 558 // r13: sender sp 559 // rdx: cp cache 560 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 561 // initialize fixed part of activation frame 562 __ push(rax); // save return address 563 __ enter(); // save old & set new rbp 564 __ push(r13); // set sender sp 565 __ push((int)NULL_WORD); // leave last_sp as null 566 __ movptr(r13, Address(rbx, Method::const_offset())); // get ConstMethod* 567 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase 568 __ push(rbx); // save Method* 569 if (ProfileInterpreter) { 570 Label method_data_continue; 571 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 572 __ testptr(rdx, rdx); 573 __ jcc(Assembler::zero, method_data_continue); 574 __ addptr(rdx, in_bytes(MethodData::data_offset())); 575 __ bind(method_data_continue); 576 __ push(rdx); // set the mdp (method data pointer) 577 } else { 578 __ push(0); 579 } 580 581 __ movptr(rdx, Address(rbx, Method::const_offset())); 582 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 583 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 584 __ push(rdx); // set constant pool cache 585 __ push(r14); // set locals pointer 586 if (native_call) { 587 __ push(0); // no bcp 588 } else { 589 __ push(r13); // set bcp 590 } 591 __ push(0); // reserve word for pointer to expression stack bottom 592 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 593 } 594 595 // End of helpers 596 597 // Various method entries 598 //------------------------------------------------------------------------------------------------------------------------ 599 // 600 // 601 602 // Call an accessor method (assuming it is resolved, otherwise drop 603 // into vanilla (slow path) entry 604 address InterpreterGenerator::generate_accessor_entry(void) { 605 // rbx: Method* 606 607 // r13: senderSP must preserver for slow path, set SP to it on fast path 608 609 address entry_point = __ pc(); 610 Label xreturn_path; 611 612 // do fastpath for resolved accessor methods 613 if (UseFastAccessorMethods) { 614 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites 615 // thereof; parameter size = 1 616 // Note: We can only use this code if the getfield has been resolved 617 // and if we don't have a null-pointer exception => check for 618 // these conditions first and use slow path if necessary. 619 Label slow_path; 620 // If we need a safepoint check, generate full interpreter entry. 621 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 622 SafepointSynchronize::_not_synchronized); 623 624 __ jcc(Assembler::notEqual, slow_path); 625 // rbx: method 626 __ movptr(rax, Address(rsp, wordSize)); 627 628 // check if local 0 != NULL and read field 629 __ testptr(rax, rax); 630 __ jcc(Assembler::zero, slow_path); 631 632 // read first instruction word and extract bytecode @ 1 and index @ 2 633 __ movptr(rdx, Address(rbx, Method::const_offset())); 634 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset())); 635 __ movl(rdx, Address(rdx, ConstMethod::codes_offset())); 636 // Shift codes right to get the index on the right. 637 // The bytecode fetched looks like <index><0xb4><0x2a> 638 __ shrl(rdx, 2 * BitsPerByte); 639 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); 640 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes())); 641 642 // rax: local 0 643 // rbx: method 644 // rdx: constant pool cache index 645 // rdi: constant pool cache 646 647 // check if getfield has been resolved and read constant pool cache entry 648 // check the validity of the cache entry by testing whether _indices field 649 // contains Bytecode::_getfield in b1 byte. 650 assert(in_words(ConstantPoolCacheEntry::size()) == 4, 651 "adjust shift below"); 652 __ movl(rcx, 653 Address(rdi, 654 rdx, 655 Address::times_8, 656 ConstantPoolCache::base_offset() + 657 ConstantPoolCacheEntry::indices_offset())); 658 __ shrl(rcx, 2 * BitsPerByte); 659 __ andl(rcx, 0xFF); 660 __ cmpl(rcx, Bytecodes::_getfield); 661 __ jcc(Assembler::notEqual, slow_path); 662 663 // Note: constant pool entry is not valid before bytecode is resolved 664 __ movptr(rcx, 665 Address(rdi, 666 rdx, 667 Address::times_8, 668 ConstantPoolCache::base_offset() + 669 ConstantPoolCacheEntry::f2_offset())); 670 // edx: flags 671 __ movl(rdx, 672 Address(rdi, 673 rdx, 674 Address::times_8, 675 ConstantPoolCache::base_offset() + 676 ConstantPoolCacheEntry::flags_offset())); 677 678 Label notObj, notInt, notByte, notShort; 679 const Address field_address(rax, rcx, Address::times_1); 680 681 // Need to differentiate between igetfield, agetfield, bgetfield etc. 682 // because they are different sizes. 683 // Use the type from the constant pool cache 684 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); 685 // Make sure we don't need to mask edx after the above shift 686 ConstantPoolCacheEntry::verify_tos_state_shift(); 687 688 __ cmpl(rdx, atos); 689 __ jcc(Assembler::notEqual, notObj); 690 // atos 691 __ load_heap_oop(rax, field_address); 692 __ jmp(xreturn_path); 693 694 __ bind(notObj); 695 __ cmpl(rdx, itos); 696 __ jcc(Assembler::notEqual, notInt); 697 // itos 698 __ movl(rax, field_address); 699 __ jmp(xreturn_path); 700 701 __ bind(notInt); 702 __ cmpl(rdx, btos); 703 __ jcc(Assembler::notEqual, notByte); 704 // btos 705 __ load_signed_byte(rax, field_address); 706 __ jmp(xreturn_path); 707 708 __ bind(notByte); 709 __ cmpl(rdx, stos); 710 __ jcc(Assembler::notEqual, notShort); 711 // stos 712 __ load_signed_short(rax, field_address); 713 __ jmp(xreturn_path); 714 715 __ bind(notShort); 716 #ifdef ASSERT 717 Label okay; 718 __ cmpl(rdx, ctos); 719 __ jcc(Assembler::equal, okay); 720 __ stop("what type is this?"); 721 __ bind(okay); 722 #endif 723 // ctos 724 __ load_unsigned_short(rax, field_address); 725 726 __ bind(xreturn_path); 727 728 // _ireturn/_areturn 729 __ pop(rdi); 730 __ mov(rsp, r13); 731 __ jmp(rdi); 732 __ ret(0); 733 734 // generate a vanilla interpreter entry as the slow path 735 __ bind(slow_path); 736 (void) generate_normal_entry(false); 737 } else { 738 (void) generate_normal_entry(false); 739 } 740 741 return entry_point; 742 } 743 744 // Method entry for java.lang.ref.Reference.get. 745 address InterpreterGenerator::generate_Reference_get_entry(void) { 746 #if INCLUDE_ALL_GCS 747 // Code: _aload_0, _getfield, _areturn 748 // parameter size = 1 749 // 750 // The code that gets generated by this routine is split into 2 parts: 751 // 1. The "intrinsified" code for G1 (or any SATB based GC), 752 // 2. The slow path - which is an expansion of the regular method entry. 753 // 754 // Notes:- 755 // * In the G1 code we do not check whether we need to block for 756 // a safepoint. If G1 is enabled then we must execute the specialized 757 // code for Reference.get (except when the Reference object is null) 758 // so that we can log the value in the referent field with an SATB 759 // update buffer. 760 // If the code for the getfield template is modified so that the 761 // G1 pre-barrier code is executed when the current method is 762 // Reference.get() then going through the normal method entry 763 // will be fine. 764 // * The G1 code can, however, check the receiver object (the instance 765 // of java.lang.Reference) and jump to the slow path if null. If the 766 // Reference object is null then we obviously cannot fetch the referent 767 // and so we don't need to call the G1 pre-barrier. Thus we can use the 768 // regular method entry code to generate the NPE. 769 // 770 // This code is based on generate_accessor_enty. 771 // 772 // rbx: Method* 773 774 // r13: senderSP must preserve for slow path, set SP to it on fast path 775 776 address entry = __ pc(); 777 778 const int referent_offset = java_lang_ref_Reference::referent_offset; 779 guarantee(referent_offset > 0, "referent offset not initialized"); 780 781 if (UseG1GC) { 782 Label slow_path; 783 // rbx: method 784 785 // Check if local 0 != NULL 786 // If the receiver is null then it is OK to jump to the slow path. 787 __ movptr(rax, Address(rsp, wordSize)); 788 789 __ testptr(rax, rax); 790 __ jcc(Assembler::zero, slow_path); 791 792 // rax: local 0 793 // rbx: method (but can be used as scratch now) 794 // rdx: scratch 795 // rdi: scratch 796 797 // Generate the G1 pre-barrier code to log the value of 798 // the referent field in an SATB buffer. 799 800 // Load the value of the referent field. 801 const Address field_address(rax, referent_offset); 802 __ load_heap_oop(rax, field_address); 803 804 // Generate the G1 pre-barrier code to log the value of 805 // the referent field in an SATB buffer. 806 __ g1_write_barrier_pre(noreg /* obj */, 807 rax /* pre_val */, 808 r15_thread /* thread */, 809 rbx /* tmp */, 810 true /* tosca_live */, 811 true /* expand_call */); 812 813 // _areturn 814 __ pop(rdi); // get return address 815 __ mov(rsp, r13); // set sp to sender sp 816 __ jmp(rdi); 817 __ ret(0); 818 819 // generate a vanilla interpreter entry as the slow path 820 __ bind(slow_path); 821 (void) generate_normal_entry(false); 822 823 return entry; 824 } 825 #endif // INCLUDE_ALL_GCS 826 827 // If G1 is not enabled then attempt to go through the accessor entry point 828 // Reference.get is an accessor 829 return generate_accessor_entry(); 830 } 831 832 833 // Interpreter stub for calling a native method. (asm interpreter) 834 // This sets up a somewhat different looking stack for calling the 835 // native method than the typical interpreter frame setup. 836 address InterpreterGenerator::generate_native_entry(bool synchronized) { 837 // determine code generation flags 838 bool inc_counter = UseCompiler || CountCompiledCalls; 839 840 // rbx: Method* 841 // r13: sender sp 842 843 address entry_point = __ pc(); 844 845 const Address constMethod (rbx, Method::const_offset()); 846 const Address invocation_counter(rbx, Method:: 847 invocation_counter_offset() + 848 InvocationCounter::counter_offset()); 849 const Address access_flags (rbx, Method::access_flags_offset()); 850 const Address size_of_parameters(rcx, ConstMethod:: 851 size_of_parameters_offset()); 852 853 854 // get parameter size (always needed) 855 __ movptr(rcx, constMethod); 856 __ load_unsigned_short(rcx, size_of_parameters); 857 858 // native calls don't need the stack size check since they have no 859 // expression stack and the arguments are already on the stack and 860 // we only add a handful of words to the stack 861 862 // rbx: Method* 863 // rcx: size of parameters 864 // r13: sender sp 865 __ pop(rax); // get return address 866 867 // for natives the size of locals is zero 868 869 // compute beginning of parameters (r14) 870 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); 871 872 // add 2 zero-initialized slots for native calls 873 // initialize result_handler slot 874 __ push((int) NULL_WORD); 875 // slot for oop temp 876 // (static native method holder mirror/jni oop result) 877 __ push((int) NULL_WORD); 878 879 if (inc_counter) { 880 __ movl(rcx, invocation_counter); // (pre-)fetch invocation count 881 } 882 883 // initialize fixed part of activation frame 884 generate_fixed_frame(true); 885 886 // make sure method is native & not abstract 887 #ifdef ASSERT 888 __ movl(rax, access_flags); 889 { 890 Label L; 891 __ testl(rax, JVM_ACC_NATIVE); 892 __ jcc(Assembler::notZero, L); 893 __ stop("tried to execute non-native method as native"); 894 __ bind(L); 895 } 896 { 897 Label L; 898 __ testl(rax, JVM_ACC_ABSTRACT); 899 __ jcc(Assembler::zero, L); 900 __ stop("tried to execute abstract method in interpreter"); 901 __ bind(L); 902 } 903 #endif 904 905 // Since at this point in the method invocation the exception handler 906 // would try to exit the monitor of synchronized methods which hasn't 907 // been entered yet, we set the thread local variable 908 // _do_not_unlock_if_synchronized to true. The remove_activation will 909 // check this flag. 910 911 const Address do_not_unlock_if_synchronized(r15_thread, 912 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 913 __ movbool(do_not_unlock_if_synchronized, true); 914 915 // increment invocation count & check for overflow 916 Label invocation_counter_overflow; 917 if (inc_counter) { 918 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 919 } 920 921 Label continue_after_compile; 922 __ bind(continue_after_compile); 923 924 bang_stack_shadow_pages(true); 925 926 // reset the _do_not_unlock_if_synchronized flag 927 __ movbool(do_not_unlock_if_synchronized, false); 928 929 // check for synchronized methods 930 // Must happen AFTER invocation_counter check and stack overflow check, 931 // so method is not locked if overflows. 932 if (synchronized) { 933 lock_method(); 934 } else { 935 // no synchronization necessary 936 #ifdef ASSERT 937 { 938 Label L; 939 __ movl(rax, access_flags); 940 __ testl(rax, JVM_ACC_SYNCHRONIZED); 941 __ jcc(Assembler::zero, L); 942 __ stop("method needs synchronization"); 943 __ bind(L); 944 } 945 #endif 946 } 947 948 // start execution 949 #ifdef ASSERT 950 { 951 Label L; 952 const Address monitor_block_top(rbp, 953 frame::interpreter_frame_monitor_block_top_offset * wordSize); 954 __ movptr(rax, monitor_block_top); 955 __ cmpptr(rax, rsp); 956 __ jcc(Assembler::equal, L); 957 __ stop("broken stack frame setup in interpreter"); 958 __ bind(L); 959 } 960 #endif 961 962 // jvmti support 963 __ notify_method_entry(); 964 965 // work registers 966 const Register method = rbx; 967 const Register t = r11; 968 969 // allocate space for parameters 970 __ get_method(method); 971 __ movptr(t, Address(method, Method::const_offset())); 972 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 973 __ shll(t, Interpreter::logStackElementSize); 974 975 __ subptr(rsp, t); 976 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 977 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 978 979 // get signature handler 980 { 981 Label L; 982 __ movptr(t, Address(method, Method::signature_handler_offset())); 983 __ testptr(t, t); 984 __ jcc(Assembler::notZero, L); 985 __ call_VM(noreg, 986 CAST_FROM_FN_PTR(address, 987 InterpreterRuntime::prepare_native_call), 988 method); 989 __ get_method(method); 990 __ movptr(t, Address(method, Method::signature_handler_offset())); 991 __ bind(L); 992 } 993 994 // call signature handler 995 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14, 996 "adjust this code"); 997 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 998 "adjust this code"); 999 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, 1000 "adjust this code"); 1001 1002 // The generated handlers do not touch RBX (the method oop). 1003 // However, large signatures cannot be cached and are generated 1004 // each time here. The slow-path generator can do a GC on return, 1005 // so we must reload it after the call. 1006 __ call(t); 1007 __ get_method(method); // slow path can do a GC, reload RBX 1008 1009 1010 // result handler is in rax 1011 // set result handler 1012 __ movptr(Address(rbp, 1013 (frame::interpreter_frame_result_handler_offset) * wordSize), 1014 rax); 1015 1016 // pass mirror handle if static call 1017 { 1018 Label L; 1019 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1020 __ movl(t, Address(method, Method::access_flags_offset())); 1021 __ testl(t, JVM_ACC_STATIC); 1022 __ jcc(Assembler::zero, L); 1023 // get mirror 1024 __ movptr(t, Address(method, Method::const_offset())); 1025 __ movptr(t, Address(t, ConstMethod::constants_offset())); 1026 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 1027 __ movptr(t, Address(t, mirror_offset)); 1028 // copy mirror into activation frame 1029 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 1030 t); 1031 // pass handle to mirror 1032 __ lea(c_rarg1, 1033 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1034 __ bind(L); 1035 } 1036 1037 // get native function entry point 1038 { 1039 Label L; 1040 __ movptr(rax, Address(method, Method::native_function_offset())); 1041 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1042 __ movptr(rscratch2, unsatisfied.addr()); 1043 __ cmpptr(rax, rscratch2); 1044 __ jcc(Assembler::notEqual, L); 1045 __ call_VM(noreg, 1046 CAST_FROM_FN_PTR(address, 1047 InterpreterRuntime::prepare_native_call), 1048 method); 1049 __ get_method(method); 1050 __ movptr(rax, Address(method, Method::native_function_offset())); 1051 __ bind(L); 1052 } 1053 1054 // pass JNIEnv 1055 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1056 1057 // It is enough that the pc() points into the right code 1058 // segment. It does not have to be the correct return pc. 1059 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1060 1061 // change thread state 1062 #ifdef ASSERT 1063 { 1064 Label L; 1065 __ movl(t, Address(r15_thread, JavaThread::thread_state_offset())); 1066 __ cmpl(t, _thread_in_Java); 1067 __ jcc(Assembler::equal, L); 1068 __ stop("Wrong thread state in native stub"); 1069 __ bind(L); 1070 } 1071 #endif 1072 1073 // Change state to native 1074 1075 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), 1076 _thread_in_native); 1077 1078 // Call the native method. 1079 __ call(rax); 1080 // result potentially in rax or xmm0 1081 1082 // Depending on runtime options, either restore the MXCSR 1083 // register after returning from the JNI Call or verify that 1084 // it wasn't changed during -Xcheck:jni. 1085 if (RestoreMXCSROnJNICalls) { 1086 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std())); 1087 } 1088 else if (CheckJNICalls) { 1089 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry()))); 1090 } 1091 1092 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1093 // in order to extract the result of a method call. If the order of these 1094 // pushes change or anything else is added to the stack then the code in 1095 // interpreter_frame_result must also change. 1096 1097 __ push(dtos); 1098 __ push(ltos); 1099 1100 // change thread state 1101 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), 1102 _thread_in_native_trans); 1103 1104 if (os::is_MP()) { 1105 if (UseMembar) { 1106 // Force this write out before the read below 1107 __ membar(Assembler::Membar_mask_bits( 1108 Assembler::LoadLoad | Assembler::LoadStore | 1109 Assembler::StoreLoad | Assembler::StoreStore)); 1110 } else { 1111 // Write serialization page so VM thread can do a pseudo remote membar. 1112 // We use the current thread pointer to calculate a thread specific 1113 // offset to write to within the page. This minimizes bus traffic 1114 // due to cache line collision. 1115 __ serialize_memory(r15_thread, rscratch2); 1116 } 1117 } 1118 1119 // check for safepoint operation in progress and/or pending suspend requests 1120 { 1121 Label Continue; 1122 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1123 SafepointSynchronize::_not_synchronized); 1124 1125 Label L; 1126 __ jcc(Assembler::notEqual, L); 1127 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0); 1128 __ jcc(Assembler::equal, Continue); 1129 __ bind(L); 1130 1131 // Don't use call_VM as it will see a possible pending exception 1132 // and forward it and never return here preventing us from 1133 // clearing _last_native_pc down below. Also can't use 1134 // call_VM_leaf either as it will check to see if r13 & r14 are 1135 // preserved and correspond to the bcp/locals pointers. So we do a 1136 // runtime call by hand. 1137 // 1138 __ mov(c_rarg0, r15_thread); 1139 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1140 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1141 __ andptr(rsp, -16); // align stack as required by ABI 1142 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1143 __ mov(rsp, r12); // restore sp 1144 __ reinit_heapbase(); 1145 __ bind(Continue); 1146 } 1147 1148 // change thread state 1149 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java); 1150 1151 // reset_last_Java_frame 1152 __ reset_last_Java_frame(true, true); 1153 1154 // reset handle block 1155 __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset())); 1156 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1157 1158 // If result is an oop unbox and store it in frame where gc will see it 1159 // and result handler will pick it up 1160 1161 { 1162 Label no_oop, store_result; 1163 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1164 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1165 __ jcc(Assembler::notEqual, no_oop); 1166 // retrieve result 1167 __ pop(ltos); 1168 __ testptr(rax, rax); 1169 __ jcc(Assembler::zero, store_result); 1170 __ movptr(rax, Address(rax, 0)); 1171 __ bind(store_result); 1172 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1173 // keep stack depth as expected by pushing oop which will eventually be discarde 1174 __ push(ltos); 1175 __ bind(no_oop); 1176 } 1177 1178 1179 { 1180 Label no_reguard; 1181 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), 1182 JavaThread::stack_guard_yellow_disabled); 1183 __ jcc(Assembler::notEqual, no_reguard); 1184 1185 __ pusha(); // XXX only save smashed registers 1186 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1187 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1188 __ andptr(rsp, -16); // align stack as required by ABI 1189 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1190 __ mov(rsp, r12); // restore sp 1191 __ popa(); // XXX only restore smashed registers 1192 __ reinit_heapbase(); 1193 1194 __ bind(no_reguard); 1195 } 1196 1197 1198 // The method register is junk from after the thread_in_native transition 1199 // until here. Also can't call_VM until the bcp has been 1200 // restored. Need bcp for throwing exception below so get it now. 1201 __ get_method(method); 1202 1203 // restore r13 to have legal interpreter frame, i.e., bci == 0 <=> 1204 // r13 == code_base() 1205 __ movptr(r13, Address(method, Method::const_offset())); // get ConstMethod* 1206 __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase 1207 // handle exceptions (exception handling will handle unlocking!) 1208 { 1209 Label L; 1210 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1211 __ jcc(Assembler::zero, L); 1212 // Note: At some point we may want to unify this with the code 1213 // used in call_VM_base(); i.e., we should use the 1214 // StubRoutines::forward_exception code. For now this doesn't work 1215 // here because the rsp is not correctly set at this point. 1216 __ MacroAssembler::call_VM(noreg, 1217 CAST_FROM_FN_PTR(address, 1218 InterpreterRuntime::throw_pending_exception)); 1219 __ should_not_reach_here(); 1220 __ bind(L); 1221 } 1222 1223 // do unlocking if necessary 1224 { 1225 Label L; 1226 __ movl(t, Address(method, Method::access_flags_offset())); 1227 __ testl(t, JVM_ACC_SYNCHRONIZED); 1228 __ jcc(Assembler::zero, L); 1229 // the code below should be shared with interpreter macro 1230 // assembler implementation 1231 { 1232 Label unlock; 1233 // BasicObjectLock will be first in list, since this is a 1234 // synchronized method. However, need to check that the object 1235 // has not been unlocked by an explicit monitorexit bytecode. 1236 const Address monitor(rbp, 1237 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1238 wordSize - sizeof(BasicObjectLock))); 1239 1240 // monitor expect in c_rarg1 for slow unlock path 1241 __ lea(c_rarg1, monitor); // address of first monitor 1242 1243 __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 1244 __ testptr(t, t); 1245 __ jcc(Assembler::notZero, unlock); 1246 1247 // Entry already unlocked, need to throw exception 1248 __ MacroAssembler::call_VM(noreg, 1249 CAST_FROM_FN_PTR(address, 1250 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1251 __ should_not_reach_here(); 1252 1253 __ bind(unlock); 1254 __ unlock_object(c_rarg1); 1255 } 1256 __ bind(L); 1257 } 1258 1259 // jvmti support 1260 // Note: This must happen _after_ handling/throwing any exceptions since 1261 // the exception handler code notifies the runtime of method exits 1262 // too. If this happens before, method entry/exit notifications are 1263 // not properly paired (was bug - gri 11/22/99). 1264 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1265 1266 // restore potential result in edx:eax, call result handler to 1267 // restore potential result in ST0 & handle result 1268 1269 __ pop(ltos); 1270 __ pop(dtos); 1271 1272 __ movptr(t, Address(rbp, 1273 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1274 __ call(t); 1275 1276 // remove activation 1277 __ movptr(t, Address(rbp, 1278 frame::interpreter_frame_sender_sp_offset * 1279 wordSize)); // get sender sp 1280 __ leave(); // remove frame anchor 1281 __ pop(rdi); // get return address 1282 __ mov(rsp, t); // set sp to sender sp 1283 __ jmp(rdi); 1284 1285 if (inc_counter) { 1286 // Handle overflow of counter and compile method 1287 __ bind(invocation_counter_overflow); 1288 generate_counter_overflow(&continue_after_compile); 1289 } 1290 1291 return entry_point; 1292 } 1293 1294 // 1295 // Generic interpreted method entry to (asm) interpreter 1296 // 1297 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1298 // determine code generation flags 1299 bool inc_counter = UseCompiler || CountCompiledCalls; 1300 1301 // ebx: Method* 1302 // r13: sender sp 1303 address entry_point = __ pc(); 1304 1305 const Address constMethod(rbx, Method::const_offset()); 1306 const Address invocation_counter(rbx, 1307 Method::invocation_counter_offset() + 1308 InvocationCounter::counter_offset()); 1309 const Address access_flags(rbx, Method::access_flags_offset()); 1310 const Address size_of_parameters(rdx, 1311 ConstMethod::size_of_parameters_offset()); 1312 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1313 1314 1315 // get parameter size (always needed) 1316 __ movptr(rdx, constMethod); 1317 __ load_unsigned_short(rcx, size_of_parameters); 1318 1319 // rbx: Method* 1320 // rcx: size of parameters 1321 // r13: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1322 1323 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1324 __ subl(rdx, rcx); // rdx = no. of additional locals 1325 1326 // YYY 1327 // __ incrementl(rdx); 1328 // __ andl(rdx, -2); 1329 1330 // see if we've got enough room on the stack for locals plus overhead. 1331 generate_stack_overflow_check(); 1332 1333 // get return address 1334 __ pop(rax); 1335 1336 // compute beginning of parameters (r14) 1337 __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize)); 1338 1339 // rdx - # of additional locals 1340 // allocate space for locals 1341 // explicitly initialize locals 1342 { 1343 Label exit, loop; 1344 __ testl(rdx, rdx); 1345 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1346 __ bind(loop); 1347 __ push((int) NULL_WORD); // initialize local variables 1348 __ decrementl(rdx); // until everything initialized 1349 __ jcc(Assembler::greater, loop); 1350 __ bind(exit); 1351 } 1352 1353 // (pre-)fetch invocation count 1354 if (inc_counter) { 1355 __ movl(rcx, invocation_counter); 1356 } 1357 // initialize fixed part of activation frame 1358 generate_fixed_frame(false); 1359 1360 // make sure method is not native & not abstract 1361 #ifdef ASSERT 1362 __ movl(rax, access_flags); 1363 { 1364 Label L; 1365 __ testl(rax, JVM_ACC_NATIVE); 1366 __ jcc(Assembler::zero, L); 1367 __ stop("tried to execute native method as non-native"); 1368 __ bind(L); 1369 } 1370 { 1371 Label L; 1372 __ testl(rax, JVM_ACC_ABSTRACT); 1373 __ jcc(Assembler::zero, L); 1374 __ stop("tried to execute abstract method in interpreter"); 1375 __ bind(L); 1376 } 1377 #endif 1378 1379 // Since at this point in the method invocation the exception 1380 // handler would try to exit the monitor of synchronized methods 1381 // which hasn't been entered yet, we set the thread local variable 1382 // _do_not_unlock_if_synchronized to true. The remove_activation 1383 // will check this flag. 1384 1385 const Address do_not_unlock_if_synchronized(r15_thread, 1386 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1387 __ movbool(do_not_unlock_if_synchronized, true); 1388 1389 // increment invocation count & check for overflow 1390 Label invocation_counter_overflow; 1391 Label profile_method; 1392 Label profile_method_continue; 1393 if (inc_counter) { 1394 generate_counter_incr(&invocation_counter_overflow, 1395 &profile_method, 1396 &profile_method_continue); 1397 if (ProfileInterpreter) { 1398 __ bind(profile_method_continue); 1399 } 1400 } 1401 1402 Label continue_after_compile; 1403 __ bind(continue_after_compile); 1404 1405 // check for synchronized interpreted methods 1406 bang_stack_shadow_pages(false); 1407 1408 // reset the _do_not_unlock_if_synchronized flag 1409 __ movbool(do_not_unlock_if_synchronized, false); 1410 1411 // check for synchronized methods 1412 // Must happen AFTER invocation_counter check and stack overflow check, 1413 // so method is not locked if overflows. 1414 if (synchronized) { 1415 // Allocate monitor and lock method 1416 lock_method(); 1417 } else { 1418 // no synchronization necessary 1419 #ifdef ASSERT 1420 { 1421 Label L; 1422 __ movl(rax, access_flags); 1423 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1424 __ jcc(Assembler::zero, L); 1425 __ stop("method needs synchronization"); 1426 __ bind(L); 1427 } 1428 #endif 1429 } 1430 1431 // start execution 1432 #ifdef ASSERT 1433 { 1434 Label L; 1435 const Address monitor_block_top (rbp, 1436 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1437 __ movptr(rax, monitor_block_top); 1438 __ cmpptr(rax, rsp); 1439 __ jcc(Assembler::equal, L); 1440 __ stop("broken stack frame setup in interpreter"); 1441 __ bind(L); 1442 } 1443 #endif 1444 1445 // jvmti support 1446 __ notify_method_entry(); 1447 1448 __ dispatch_next(vtos); 1449 1450 // invocation counter overflow 1451 if (inc_counter) { 1452 if (ProfileInterpreter) { 1453 // We have decided to profile this method in the interpreter 1454 __ bind(profile_method); 1455 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1456 __ set_method_data_pointer_for_bcp(); 1457 __ get_method(rbx); 1458 __ jmp(profile_method_continue); 1459 } 1460 // Handle overflow of counter and compile method 1461 __ bind(invocation_counter_overflow); 1462 generate_counter_overflow(&continue_after_compile); 1463 } 1464 1465 return entry_point; 1466 } 1467 1468 // Entry points 1469 // 1470 // Here we generate the various kind of entries into the interpreter. 1471 // The two main entry type are generic bytecode methods and native 1472 // call method. These both come in synchronized and non-synchronized 1473 // versions but the frame layout they create is very similar. The 1474 // other method entry types are really just special purpose entries 1475 // that are really entry and interpretation all in one. These are for 1476 // trivial methods like accessor, empty, or special math methods. 1477 // 1478 // When control flow reaches any of the entry types for the interpreter 1479 // the following holds -> 1480 // 1481 // Arguments: 1482 // 1483 // rbx: Method* 1484 // 1485 // Stack layout immediately at entry 1486 // 1487 // [ return address ] <--- rsp 1488 // [ parameter n ] 1489 // ... 1490 // [ parameter 1 ] 1491 // [ expression stack ] (caller's java expression stack) 1492 1493 // Assuming that we don't go to one of the trivial specialized entries 1494 // the stack will look like below when we are ready to execute the 1495 // first bytecode (or call the native routine). The register usage 1496 // will be as the template based interpreter expects (see 1497 // interpreter_amd64.hpp). 1498 // 1499 // local variables follow incoming parameters immediately; i.e. 1500 // the return address is moved to the end of the locals). 1501 // 1502 // [ monitor entry ] <--- rsp 1503 // ... 1504 // [ monitor entry ] 1505 // [ expr. stack bottom ] 1506 // [ saved r13 ] 1507 // [ current r14 ] 1508 // [ Method* ] 1509 // [ saved ebp ] <--- rbp 1510 // [ return address ] 1511 // [ local variable m ] 1512 // ... 1513 // [ local variable 1 ] 1514 // [ parameter n ] 1515 // ... 1516 // [ parameter 1 ] <--- r14 1517 1518 address AbstractInterpreterGenerator::generate_method_entry( 1519 AbstractInterpreter::MethodKind kind) { 1520 // determine code generation flags 1521 bool synchronized = false; 1522 address entry_point = NULL; 1523 1524 switch (kind) { 1525 case Interpreter::zerolocals : break; 1526 case Interpreter::zerolocals_synchronized: synchronized = true; break; 1527 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; 1528 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break; 1529 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; 1530 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; 1531 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; 1532 1533 case Interpreter::java_lang_math_sin : // fall thru 1534 case Interpreter::java_lang_math_cos : // fall thru 1535 case Interpreter::java_lang_math_tan : // fall thru 1536 case Interpreter::java_lang_math_abs : // fall thru 1537 case Interpreter::java_lang_math_log : // fall thru 1538 case Interpreter::java_lang_math_log10 : // fall thru 1539 case Interpreter::java_lang_math_sqrt : // fall thru 1540 case Interpreter::java_lang_math_pow : // fall thru 1541 case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; 1542 case Interpreter::java_lang_ref_reference_get 1543 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; 1544 default: 1545 fatal(err_msg("unexpected method kind: %d", kind)); 1546 break; 1547 } 1548 1549 if (entry_point) { 1550 return entry_point; 1551 } 1552 1553 return ((InterpreterGenerator*) this)-> 1554 generate_normal_entry(synchronized); 1555 } 1556 1557 // These should never be compiled since the interpreter will prefer 1558 // the compiled version to the intrinsic version. 1559 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1560 switch (method_kind(m)) { 1561 case Interpreter::java_lang_math_sin : // fall thru 1562 case Interpreter::java_lang_math_cos : // fall thru 1563 case Interpreter::java_lang_math_tan : // fall thru 1564 case Interpreter::java_lang_math_abs : // fall thru 1565 case Interpreter::java_lang_math_log : // fall thru 1566 case Interpreter::java_lang_math_log10 : // fall thru 1567 case Interpreter::java_lang_math_sqrt : // fall thru 1568 case Interpreter::java_lang_math_pow : // fall thru 1569 case Interpreter::java_lang_math_exp : 1570 return false; 1571 default: 1572 return true; 1573 } 1574 } 1575 1576 // How much stack a method activation needs in words. 1577 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1578 const int entry_size = frame::interpreter_frame_monitor_size(); 1579 1580 // total overhead size: entry_size + (saved rbp thru expr stack 1581 // bottom). be sure to change this if you add/subtract anything 1582 // to/from the overhead area 1583 const int overhead_size = 1584 -(frame::interpreter_frame_initial_sp_offset) + entry_size; 1585 1586 const int stub_code = frame::entry_frame_after_call_words; 1587 const int extra_stack = Method::extra_stack_entries(); 1588 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * 1589 Interpreter::stackElementWords; 1590 return (overhead_size + method_stack + stub_code); 1591 } 1592 1593 int AbstractInterpreter::layout_activation(Method* method, 1594 int tempcount, 1595 int popframe_extra_args, 1596 int moncount, 1597 int caller_actual_parameters, 1598 int callee_param_count, 1599 int callee_locals, 1600 frame* caller, 1601 frame* interpreter_frame, 1602 bool is_top_frame, 1603 bool is_bottom_frame) { 1604 // Note: This calculation must exactly parallel the frame setup 1605 // in AbstractInterpreterGenerator::generate_method_entry. 1606 // If interpreter_frame!=NULL, set up the method, locals, and monitors. 1607 // The frame interpreter_frame, if not NULL, is guaranteed to be the 1608 // right size, as determined by a previous call to this method. 1609 // It is also guaranteed to be walkable even though it is in a skeletal state 1610 1611 // fixed size of an interpreter frame: 1612 int max_locals = method->max_locals() * Interpreter::stackElementWords; 1613 int extra_locals = (method->max_locals() - method->size_of_parameters()) * 1614 Interpreter::stackElementWords; 1615 1616 int overhead = frame::sender_sp_offset - 1617 frame::interpreter_frame_initial_sp_offset; 1618 // Our locals were accounted for by the caller (or last_frame_adjust 1619 // on the transistion) Since the callee parameters already account 1620 // for the callee's params we only need to account for the extra 1621 // locals. 1622 int size = overhead + 1623 (callee_locals - callee_param_count)*Interpreter::stackElementWords + 1624 moncount * frame::interpreter_frame_monitor_size() + 1625 tempcount* Interpreter::stackElementWords + popframe_extra_args; 1626 if (interpreter_frame != NULL) { 1627 #ifdef ASSERT 1628 if (!EnableInvokeDynamic) 1629 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences? 1630 // Probably, since deoptimization doesn't work yet. 1631 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable"); 1632 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)"); 1633 #endif 1634 1635 interpreter_frame->interpreter_frame_set_method(method); 1636 // NOTE the difference in using sender_sp and 1637 // interpreter_frame_sender_sp interpreter_frame_sender_sp is 1638 // the original sp of the caller (the unextended_sp) and 1639 // sender_sp is fp+16 XXX 1640 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; 1641 1642 #ifdef ASSERT 1643 if (caller->is_interpreted_frame()) { 1644 assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); 1645 } 1646 #endif 1647 1648 interpreter_frame->interpreter_frame_set_locals(locals); 1649 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); 1650 BasicObjectLock* monbot = montop - moncount; 1651 interpreter_frame->interpreter_frame_set_monitor_end(monbot); 1652 1653 // Set last_sp 1654 intptr_t* esp = (intptr_t*) monbot - 1655 tempcount*Interpreter::stackElementWords - 1656 popframe_extra_args; 1657 interpreter_frame->interpreter_frame_set_last_sp(esp); 1658 1659 // All frames but the initial (oldest) interpreter frame we fill in have 1660 // a value for sender_sp that allows walking the stack but isn't 1661 // truly correct. Correct the value here. 1662 if (extra_locals != 0 && 1663 interpreter_frame->sender_sp() == 1664 interpreter_frame->interpreter_frame_sender_sp()) { 1665 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + 1666 extra_locals); 1667 } 1668 *interpreter_frame->interpreter_frame_cache_addr() = 1669 method->constants()->cache(); 1670 } 1671 return size; 1672 } 1673 1674 //----------------------------------------------------------------------------- 1675 // Exceptions 1676 1677 void TemplateInterpreterGenerator::generate_throw_exception() { 1678 // Entry point in previous activation (i.e., if the caller was 1679 // interpreted) 1680 Interpreter::_rethrow_exception_entry = __ pc(); 1681 // Restore sp to interpreter_frame_last_sp even though we are going 1682 // to empty the expression stack for the exception processing. 1683 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1684 // rax: exception 1685 // rdx: return address/pc that threw exception 1686 __ restore_bcp(); // r13 points to call/send 1687 __ restore_locals(); 1688 __ reinit_heapbase(); // restore r12 as heapbase. 1689 // Entry point for exceptions thrown within interpreter code 1690 Interpreter::_throw_exception_entry = __ pc(); 1691 // expression stack is undefined here 1692 // rax: exception 1693 // r13: exception bcp 1694 __ verify_oop(rax); 1695 __ mov(c_rarg1, rax); 1696 1697 // expression stack must be empty before entering the VM in case of 1698 // an exception 1699 __ empty_expression_stack(); 1700 // find exception handler address and preserve exception oop 1701 __ call_VM(rdx, 1702 CAST_FROM_FN_PTR(address, 1703 InterpreterRuntime::exception_handler_for_exception), 1704 c_rarg1); 1705 // rax: exception handler entry point 1706 // rdx: preserved exception oop 1707 // r13: bcp for exception handler 1708 __ push_ptr(rdx); // push exception which is now the only value on the stack 1709 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1710 1711 // If the exception is not handled in the current frame the frame is 1712 // removed and the exception is rethrown (i.e. exception 1713 // continuation is _rethrow_exception). 1714 // 1715 // Note: At this point the bci is still the bxi for the instruction 1716 // which caused the exception and the expression stack is 1717 // empty. Thus, for any VM calls at this point, GC will find a legal 1718 // oop map (with empty expression stack). 1719 1720 // In current activation 1721 // tos: exception 1722 // esi: exception bcp 1723 1724 // 1725 // JVMTI PopFrame support 1726 // 1727 1728 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1729 __ empty_expression_stack(); 1730 // Set the popframe_processing bit in pending_popframe_condition 1731 // indicating that we are currently handling popframe, so that 1732 // call_VMs that may happen later do not trigger new popframe 1733 // handling cycles. 1734 __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset())); 1735 __ orl(rdx, JavaThread::popframe_processing_bit); 1736 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx); 1737 1738 { 1739 // Check to see whether we are returning to a deoptimized frame. 1740 // (The PopFrame call ensures that the caller of the popped frame is 1741 // either interpreted or compiled and deoptimizes it if compiled.) 1742 // In this case, we can't call dispatch_next() after the frame is 1743 // popped, but instead must save the incoming arguments and restore 1744 // them after deoptimization has occurred. 1745 // 1746 // Note that we don't compare the return PC against the 1747 // deoptimization blob's unpack entry because of the presence of 1748 // adapter frames in C2. 1749 Label caller_not_deoptimized; 1750 __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize)); 1751 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1752 InterpreterRuntime::interpreter_contains), c_rarg1); 1753 __ testl(rax, rax); 1754 __ jcc(Assembler::notZero, caller_not_deoptimized); 1755 1756 // Compute size of arguments for saving when returning to 1757 // deoptimized caller 1758 __ get_method(rax); 1759 __ movptr(rax, Address(rax, Method::const_offset())); 1760 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1761 size_of_parameters_offset()))); 1762 __ shll(rax, Interpreter::logStackElementSize); 1763 __ restore_locals(); // XXX do we need this? 1764 __ subptr(r14, rax); 1765 __ addptr(r14, wordSize); 1766 // Save these arguments 1767 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1768 Deoptimization:: 1769 popframe_preserve_args), 1770 r15_thread, rax, r14); 1771 1772 __ remove_activation(vtos, rdx, 1773 /* throw_monitor_exception */ false, 1774 /* install_monitor_exception */ false, 1775 /* notify_jvmdi */ false); 1776 1777 // Inform deoptimization that it is responsible for restoring 1778 // these arguments 1779 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), 1780 JavaThread::popframe_force_deopt_reexecution_bit); 1781 1782 // Continue in deoptimization handler 1783 __ jmp(rdx); 1784 1785 __ bind(caller_not_deoptimized); 1786 } 1787 1788 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1789 /* throw_monitor_exception */ false, 1790 /* install_monitor_exception */ false, 1791 /* notify_jvmdi */ false); 1792 1793 // Finish with popframe handling 1794 // A previous I2C followed by a deoptimization might have moved the 1795 // outgoing arguments further up the stack. PopFrame expects the 1796 // mutations to those outgoing arguments to be preserved and other 1797 // constraints basically require this frame to look exactly as 1798 // though it had previously invoked an interpreted activation with 1799 // no space between the top of the expression stack (current 1800 // last_sp) and the top of stack. Rather than force deopt to 1801 // maintain this kind of invariant all the time we call a small 1802 // fixup routine to move the mutated arguments onto the top of our 1803 // expression stack if necessary. 1804 __ mov(c_rarg1, rsp); 1805 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1806 // PC must point into interpreter here 1807 __ set_last_Java_frame(noreg, rbp, __ pc()); 1808 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1809 __ reset_last_Java_frame(true, true); 1810 // Restore the last_sp and null it out 1811 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1812 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1813 1814 __ restore_bcp(); // XXX do we need this? 1815 __ restore_locals(); // XXX do we need this? 1816 // The method data pointer was incremented already during 1817 // call profiling. We have to restore the mdp for the current bcp. 1818 if (ProfileInterpreter) { 1819 __ set_method_data_pointer_for_bcp(); 1820 } 1821 1822 // Clear the popframe condition flag 1823 __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), 1824 JavaThread::popframe_inactive); 1825 1826 __ dispatch_next(vtos); 1827 // end of PopFrame support 1828 1829 Interpreter::_remove_activation_entry = __ pc(); 1830 1831 // preserve exception over this code sequence 1832 __ pop_ptr(rax); 1833 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax); 1834 // remove the activation (without doing throws on illegalMonitorExceptions) 1835 __ remove_activation(vtos, rdx, false, true, false); 1836 // restore exception 1837 __ get_vm_result(rax, r15_thread); 1838 1839 // In between activations - previous activation type unknown yet 1840 // compute continuation point - the continuation point expects the 1841 // following registers set up: 1842 // 1843 // rax: exception 1844 // rdx: return address/pc that threw exception 1845 // rsp: expression stack of caller 1846 // rbp: ebp of caller 1847 __ push(rax); // save exception 1848 __ push(rdx); // save return address 1849 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1850 SharedRuntime::exception_handler_for_return_address), 1851 r15_thread, rdx); 1852 __ mov(rbx, rax); // save exception handler 1853 __ pop(rdx); // restore return address 1854 __ pop(rax); // restore exception 1855 // Note that an "issuing PC" is actually the next PC after the call 1856 __ jmp(rbx); // jump to exception 1857 // handler of caller 1858 } 1859 1860 1861 // 1862 // JVMTI ForceEarlyReturn support 1863 // 1864 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1865 address entry = __ pc(); 1866 1867 __ restore_bcp(); 1868 __ restore_locals(); 1869 __ empty_expression_stack(); 1870 __ load_earlyret_value(state); 1871 1872 __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); 1873 Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset()); 1874 1875 // Clear the earlyret state 1876 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1877 1878 __ remove_activation(state, rsi, 1879 false, /* throw_monitor_exception */ 1880 false, /* install_monitor_exception */ 1881 true); /* notify_jvmdi */ 1882 __ jmp(rsi); 1883 1884 return entry; 1885 } // end of ForceEarlyReturn support 1886 1887 1888 //----------------------------------------------------------------------------- 1889 // Helper for vtos entry point generation 1890 1891 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1892 address& bep, 1893 address& cep, 1894 address& sep, 1895 address& aep, 1896 address& iep, 1897 address& lep, 1898 address& fep, 1899 address& dep, 1900 address& vep) { 1901 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1902 Label L; 1903 aep = __ pc(); __ push_ptr(); __ jmp(L); 1904 fep = __ pc(); __ push_f(); __ jmp(L); 1905 dep = __ pc(); __ push_d(); __ jmp(L); 1906 lep = __ pc(); __ push_l(); __ jmp(L); 1907 bep = cep = sep = 1908 iep = __ pc(); __ push_i(); 1909 vep = __ pc(); 1910 __ bind(L); 1911 generate_and_dispatch(t); 1912 } 1913 1914 1915 //----------------------------------------------------------------------------- 1916 // Generation of individual instructions 1917 1918 // helpers for generate_and_dispatch 1919 1920 1921 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 1922 : TemplateInterpreterGenerator(code) { 1923 generate_all(); // down here so it can be "virtual" 1924 } 1925 1926 //----------------------------------------------------------------------------- 1927 1928 // Non-product code 1929 #ifndef PRODUCT 1930 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1931 address entry = __ pc(); 1932 1933 __ push(state); 1934 __ push(c_rarg0); 1935 __ push(c_rarg1); 1936 __ push(c_rarg2); 1937 __ push(c_rarg3); 1938 __ mov(c_rarg2, rax); // Pass itos 1939 #ifdef _WIN64 1940 __ movflt(xmm3, xmm0); // Pass ftos 1941 #endif 1942 __ call_VM(noreg, 1943 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), 1944 c_rarg1, c_rarg2, c_rarg3); 1945 __ pop(c_rarg3); 1946 __ pop(c_rarg2); 1947 __ pop(c_rarg1); 1948 __ pop(c_rarg0); 1949 __ pop(state); 1950 __ ret(0); // return from result handler 1951 1952 return entry; 1953 } 1954 1955 void TemplateInterpreterGenerator::count_bytecode() { 1956 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1957 } 1958 1959 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1960 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1961 } 1962 1963 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1964 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1965 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1966 __ orl(rbx, 1967 ((int) t->bytecode()) << 1968 BytecodePairHistogram::log2_number_of_codes); 1969 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1970 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1971 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1972 } 1973 1974 1975 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1976 // Call a little run-time stub to avoid blow-up for each bytecode. 1977 // The run-time runtime saves the right registers, depending on 1978 // the tosca in-state for the given template. 1979 1980 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1981 "entry must have been generated"); 1982 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1983 __ andptr(rsp, -16); // align stack as required by ABI 1984 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1985 __ mov(rsp, r12); // restore sp 1986 __ reinit_heapbase(); 1987 } 1988 1989 1990 void TemplateInterpreterGenerator::stop_interpreter_at() { 1991 Label L; 1992 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1993 StopInterpreterAt); 1994 __ jcc(Assembler::notEqual, L); 1995 __ int3(); 1996 __ bind(L); 1997 } 1998 #endif // !PRODUCT 1999 #endif // ! CC_INTERP