1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodDataOop.hpp" 34 #include "oops/methodOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 47 48 #define __ _masm-> 49 50 51 #ifndef CC_INTERP 52 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 53 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; 54 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 55 56 //------------------------------------------------------------------------------------------------------------------------ 57 58 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 59 address entry = __ pc(); 60 61 // Note: There should be a minimal interpreter frame set up when stack 62 // overflow occurs since we check explicitly for it now. 63 // 64 #ifdef ASSERT 65 { Label L; 66 __ lea(rax, Address(rbp, 67 frame::interpreter_frame_monitor_block_top_offset * wordSize)); 68 __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp, 69 // (stack grows negative) 70 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 71 __ stop ("interpreter frame not set up"); 72 __ bind(L); 73 } 74 #endif // ASSERT 75 // Restore bcp under the assumption that the current frame is still 76 // interpreted 77 __ restore_bcp(); 78 79 // expression stack must be empty before entering the VM if an exception 80 // happened 81 __ empty_expression_stack(); 82 __ empty_FPU_stack(); 83 // throw exception 84 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 85 return entry; 86 } 87 88 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 89 address entry = __ pc(); 90 // expression stack must be empty before entering the VM if an exception happened 91 __ empty_expression_stack(); 92 __ empty_FPU_stack(); 93 // setup parameters 94 // ??? convention: expect aberrant index in register rbx, 95 __ lea(rax, ExternalAddress((address)name)); 96 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx); 97 return entry; 98 } 99 100 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 101 address entry = __ pc(); 102 // object is at TOS 103 __ pop(rax); 104 // expression stack must be empty before entering the VM if an exception 105 // happened 106 __ empty_expression_stack(); 107 __ empty_FPU_stack(); 108 __ call_VM(noreg, 109 CAST_FROM_FN_PTR(address, 110 InterpreterRuntime::throw_ClassCastException), 111 rax); 112 return entry; 113 } 114 115 // Arguments are: required type at TOS+4, failing object (or NULL) at TOS. 116 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() { 117 address entry = __ pc(); 118 119 __ pop(rbx); // actual failing object is at TOS 120 __ pop(rax); // required type is at TOS+4 121 122 __ verify_oop(rbx); 123 __ verify_oop(rax); 124 125 // Various method handle types use interpreter registers as temps. 126 __ restore_bcp(); 127 __ restore_locals(); 128 129 // Expression stack must be empty before entering the VM for an exception. 130 __ empty_expression_stack(); 131 __ empty_FPU_stack(); 132 __ call_VM(noreg, 133 CAST_FROM_FN_PTR(address, 134 InterpreterRuntime::throw_WrongMethodTypeException), 135 // pass required type, failing object (or NULL) 136 rax, rbx); 137 return entry; 138 } 139 140 141 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 142 assert(!pass_oop || message == NULL, "either oop or message but not both"); 143 address entry = __ pc(); 144 if (pass_oop) { 145 // object is at TOS 146 __ pop(rbx); 147 } 148 // expression stack must be empty before entering the VM if an exception happened 149 __ empty_expression_stack(); 150 __ empty_FPU_stack(); 151 // setup parameters 152 __ lea(rax, ExternalAddress((address)name)); 153 if (pass_oop) { 154 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx); 155 } else { 156 if (message != NULL) { 157 __ lea(rbx, ExternalAddress((address)message)); 158 } else { 159 __ movptr(rbx, NULL_WORD); 160 } 161 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx); 162 } 163 // throw exception 164 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 165 return entry; 166 } 167 168 169 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 170 address entry = __ pc(); 171 // NULL last_sp until next java call 172 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 173 __ dispatch_next(state); 174 return entry; 175 } 176 177 178 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { 179 TosState incoming_state = state; 180 address entry = __ pc(); 181 182 #ifdef COMPILER2 183 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 184 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { 185 for (int i = 1; i < 8; i++) { 186 __ ffree(i); 187 } 188 } else if (UseSSE < 2) { 189 __ empty_FPU_stack(); 190 } 191 #endif 192 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { 193 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 194 } else { 195 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 196 } 197 198 // In SSE mode, interpreter returns FP results in xmm0 but they need 199 // to end up back on the FPU so it can operate on them. 200 if (incoming_state == ftos && UseSSE >= 1) { 201 __ subptr(rsp, wordSize); 202 __ movflt(Address(rsp, 0), xmm0); 203 __ fld_s(Address(rsp, 0)); 204 __ addptr(rsp, wordSize); 205 } else if (incoming_state == dtos && UseSSE >= 2) { 206 __ subptr(rsp, 2*wordSize); 207 __ movdbl(Address(rsp, 0), xmm0); 208 __ fld_d(Address(rsp, 0)); 209 __ addptr(rsp, 2*wordSize); 210 } 211 212 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter"); 213 214 // Restore stack bottom in case i2c adjusted stack 215 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 216 // and NULL it as marker that rsp is now tos until next java call 217 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 218 219 __ restore_bcp(); 220 __ restore_locals(); 221 222 Label L_got_cache, L_giant_index; 223 if (EnableInvokeDynamic) { 224 __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic); 225 __ jcc(Assembler::equal, L_giant_index); 226 } 227 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); 228 __ bind(L_got_cache); 229 __ movl(rbx, Address(rbx, rcx, 230 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + 231 ConstantPoolCacheEntry::flags_offset())); 232 __ andptr(rbx, 0xFF); 233 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); 234 __ dispatch_next(state, step); 235 236 // out of the main line of code... 237 if (EnableInvokeDynamic) { 238 __ bind(L_giant_index); 239 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); 240 __ jmp(L_got_cache); 241 } 242 243 return entry; 244 } 245 246 247 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 248 address entry = __ pc(); 249 250 // In SSE mode, FP results are in xmm0 251 if (state == ftos && UseSSE > 0) { 252 __ subptr(rsp, wordSize); 253 __ movflt(Address(rsp, 0), xmm0); 254 __ fld_s(Address(rsp, 0)); 255 __ addptr(rsp, wordSize); 256 } else if (state == dtos && UseSSE >= 2) { 257 __ subptr(rsp, 2*wordSize); 258 __ movdbl(Address(rsp, 0), xmm0); 259 __ fld_d(Address(rsp, 0)); 260 __ addptr(rsp, 2*wordSize); 261 } 262 263 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter"); 264 265 // The stack is not extended by deopt but we must NULL last_sp as this 266 // entry is like a "return". 267 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 268 __ restore_bcp(); 269 __ restore_locals(); 270 // handle exceptions 271 { Label L; 272 const Register thread = rcx; 273 __ get_thread(thread); 274 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 275 __ jcc(Assembler::zero, L); 276 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 277 __ should_not_reach_here(); 278 __ bind(L); 279 } 280 __ dispatch_next(state, step); 281 return entry; 282 } 283 284 285 int AbstractInterpreter::BasicType_as_index(BasicType type) { 286 int i = 0; 287 switch (type) { 288 case T_BOOLEAN: i = 0; break; 289 case T_CHAR : i = 1; break; 290 case T_BYTE : i = 2; break; 291 case T_SHORT : i = 3; break; 292 case T_INT : // fall through 293 case T_LONG : // fall through 294 case T_VOID : i = 4; break; 295 case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE 296 case T_DOUBLE : i = 6; break; 297 case T_OBJECT : // fall through 298 case T_ARRAY : i = 7; break; 299 default : ShouldNotReachHere(); 300 } 301 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds"); 302 return i; 303 } 304 305 306 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 307 address entry = __ pc(); 308 switch (type) { 309 case T_BOOLEAN: __ c2bool(rax); break; 310 case T_CHAR : __ andptr(rax, 0xFFFF); break; 311 case T_BYTE : __ sign_extend_byte (rax); break; 312 case T_SHORT : __ sign_extend_short(rax); break; 313 case T_INT : /* nothing to do */ break; 314 case T_DOUBLE : 315 case T_FLOAT : 316 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 317 __ pop(t); // remove return address first 318 // Must return a result for interpreter or compiler. In SSE 319 // mode, results are returned in xmm0 and the FPU stack must 320 // be empty. 321 if (type == T_FLOAT && UseSSE >= 1) { 322 // Load ST0 323 __ fld_d(Address(rsp, 0)); 324 // Store as float and empty fpu stack 325 __ fstp_s(Address(rsp, 0)); 326 // and reload 327 __ movflt(xmm0, Address(rsp, 0)); 328 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 329 __ movdbl(xmm0, Address(rsp, 0)); 330 } else { 331 // restore ST0 332 __ fld_d(Address(rsp, 0)); 333 } 334 // and pop the temp 335 __ addptr(rsp, 2 * wordSize); 336 __ push(t); // restore return address 337 } 338 break; 339 case T_OBJECT : 340 // retrieve result from frame 341 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 342 // and verify it 343 __ verify_oop(rax); 344 break; 345 default : ShouldNotReachHere(); 346 } 347 __ ret(0); // return from result handler 348 return entry; 349 } 350 351 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 352 address entry = __ pc(); 353 __ push(state); 354 __ call_VM(noreg, runtime_entry); 355 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 356 return entry; 357 } 358 359 360 // Helpers for commoning out cases in the various type of method entries. 361 // 362 363 // increment invocation count & check for overflow 364 // 365 // Note: checking for negative value instead of overflow 366 // so we have a 'sticky' overflow test 367 // 368 // rbx,: method 369 // rcx: invocation counter 370 // 371 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 372 const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) + 373 in_bytes(InvocationCounter::counter_offset())); 374 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not. 375 if (TieredCompilation) { 376 int increment = InvocationCounter::count_increment; 377 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 378 Label no_mdo, done; 379 if (ProfileInterpreter) { 380 // Are we profiling? 381 __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset())); 382 __ testptr(rax, rax); 383 __ jccb(Assembler::zero, no_mdo); 384 // Increment counter in the MDO 385 const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) + 386 in_bytes(InvocationCounter::counter_offset())); 387 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 388 __ jmpb(done); 389 } 390 __ bind(no_mdo); 391 // Increment counter in methodOop (we don't need to load it, it's in rcx). 392 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); 393 __ bind(done); 394 } else { 395 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + 396 InvocationCounter::counter_offset()); 397 398 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 399 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); 400 } 401 // Update standard invocation counters 402 __ movl(rax, backedge_counter); // load backedge counter 403 404 __ incrementl(rcx, InvocationCounter::count_increment); 405 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 406 407 __ movl(invocation_counter, rcx); // save invocation count 408 __ addl(rcx, rax); // add both counters 409 410 // profile_method is non-null only for interpreted method so 411 // profile_method != NULL == !native_call 412 // BytecodeInterpreter only calls for native so code is elided. 413 414 if (ProfileInterpreter && profile_method != NULL) { 415 // Test to see if we should create a method data oop 416 __ cmp32(rcx, 417 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 418 __ jcc(Assembler::less, *profile_method_continue); 419 420 // if no method data exists, go to profile_method 421 __ test_method_data_pointer(rax, *profile_method); 422 } 423 424 __ cmp32(rcx, 425 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 426 __ jcc(Assembler::aboveEqual, *overflow); 427 } 428 } 429 430 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 431 432 // Asm interpreter on entry 433 // rdi - locals 434 // rsi - bcp 435 // rbx, - method 436 // rdx - cpool 437 // rbp, - interpreter frame 438 439 // C++ interpreter on entry 440 // rsi - new interpreter state pointer 441 // rbp - interpreter frame pointer 442 // rbx - method 443 444 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 445 // rbx, - method 446 // rcx - rcvr (assuming there is one) 447 // top of stack return address of interpreter caller 448 // rsp - sender_sp 449 450 // C++ interpreter only 451 // rsi - previous interpreter state pointer 452 453 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); 454 455 // InterpreterRuntime::frequency_counter_overflow takes one argument 456 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 457 // The call returns the address of the verified entry point for the method or NULL 458 // if the compilation did not complete (either went background or bailed out). 459 __ movptr(rax, (intptr_t)false); 460 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); 461 462 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop 463 464 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame 465 // and jump to the interpreted entry. 466 __ jmp(*do_continue, relocInfo::none); 467 468 } 469 470 void InterpreterGenerator::generate_stack_overflow_check(void) { 471 // see if we've got enough room on the stack for locals plus overhead. 472 // the expression stack grows down incrementally, so the normal guard 473 // page mechanism will work for that. 474 // 475 // Registers live on entry: 476 // 477 // Asm interpreter 478 // rdx: number of additional locals this frame needs (what we must check) 479 // rbx,: methodOop 480 481 // destroyed on exit 482 // rax, 483 484 // NOTE: since the additional locals are also always pushed (wasn't obvious in 485 // generate_method_entry) so the guard should work for them too. 486 // 487 488 // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp 489 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 490 491 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). 492 // be sure to change this if you add/subtract anything to/from the overhead area 493 const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size; 494 495 const int page_size = os::vm_page_size(); 496 497 Label after_frame_check; 498 499 // see if the frame is greater than one page in size. If so, 500 // then we need to verify there is enough stack space remaining 501 // for the additional locals. 502 __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize); 503 __ jcc(Assembler::belowEqual, after_frame_check); 504 505 // compute rsp as if this were going to be the last frame on 506 // the stack before the red zone 507 508 Label after_frame_check_pop; 509 510 __ push(rsi); 511 512 const Register thread = rsi; 513 514 __ get_thread(thread); 515 516 const Address stack_base(thread, Thread::stack_base_offset()); 517 const Address stack_size(thread, Thread::stack_size_offset()); 518 519 // locals + overhead, in bytes 520 __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size)); 521 522 #ifdef ASSERT 523 Label stack_base_okay, stack_size_okay; 524 // verify that thread stack base is non-zero 525 __ cmpptr(stack_base, (int32_t)NULL_WORD); 526 __ jcc(Assembler::notEqual, stack_base_okay); 527 __ stop("stack base is zero"); 528 __ bind(stack_base_okay); 529 // verify that thread stack size is non-zero 530 __ cmpptr(stack_size, 0); 531 __ jcc(Assembler::notEqual, stack_size_okay); 532 __ stop("stack size is zero"); 533 __ bind(stack_size_okay); 534 #endif 535 536 // Add stack base to locals and subtract stack size 537 __ addptr(rax, stack_base); 538 __ subptr(rax, stack_size); 539 540 // Use the maximum number of pages we might bang. 541 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 542 (StackRedPages+StackYellowPages); 543 __ addptr(rax, max_pages * page_size); 544 545 // check against the current stack bottom 546 __ cmpptr(rsp, rax); 547 __ jcc(Assembler::above, after_frame_check_pop); 548 549 __ pop(rsi); // get saved bcp / (c++ prev state ). 550 551 __ pop(rax); // get return address 552 __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry())); 553 554 // all done with frame size check 555 __ bind(after_frame_check_pop); 556 __ pop(rsi); 557 558 __ bind(after_frame_check); 559 } 560 561 // Allocate monitor and lock method (asm interpreter) 562 // rbx, - methodOop 563 // 564 void InterpreterGenerator::lock_method(void) { 565 // synchronize method 566 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); 567 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 568 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 569 570 #ifdef ASSERT 571 { Label L; 572 __ movl(rax, access_flags); 573 __ testl(rax, JVM_ACC_SYNCHRONIZED); 574 __ jcc(Assembler::notZero, L); 575 __ stop("method doesn't need synchronization"); 576 __ bind(L); 577 } 578 #endif // ASSERT 579 // get synchronization object 580 { Label done; 581 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 582 __ movl(rax, access_flags); 583 __ testl(rax, JVM_ACC_STATIC); 584 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 585 __ jcc(Assembler::zero, done); 586 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset())); 587 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); 588 __ movptr(rax, Address(rax, mirror_offset)); 589 __ bind(done); 590 } 591 // add space for monitor & lock 592 __ subptr(rsp, entry_size); // add space for a monitor entry 593 __ movptr(monitor_block_top, rsp); // set new monitor block top 594 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object 595 __ mov(rdx, rsp); // object address 596 __ lock_object(rdx); 597 } 598 599 // 600 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 601 // and for native methods hence the shared code. 602 603 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 604 // initialize fixed part of activation frame 605 __ push(rax); // save return address 606 __ enter(); // save old & set new rbp, 607 608 609 __ push(rsi); // set sender sp 610 __ push((int32_t)NULL_WORD); // leave last_sp as null 611 __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop 612 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase 613 __ push(rbx); // save methodOop 614 if (ProfileInterpreter) { 615 Label method_data_continue; 616 __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); 617 __ testptr(rdx, rdx); 618 __ jcc(Assembler::zero, method_data_continue); 619 __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset())); 620 __ bind(method_data_continue); 621 __ push(rdx); // set the mdp (method data pointer) 622 } else { 623 __ push(0); 624 } 625 626 __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset())); 627 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); 628 __ push(rdx); // set constant pool cache 629 __ push(rdi); // set locals pointer 630 if (native_call) { 631 __ push(0); // no bcp 632 } else { 633 __ push(rsi); // set bcp 634 } 635 __ push(0); // reserve word for pointer to expression stack bottom 636 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 637 } 638 639 // End of helpers 640 641 // 642 // Various method entries 643 //------------------------------------------------------------------------------------------------------------------------ 644 // 645 // 646 647 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry 648 649 address InterpreterGenerator::generate_accessor_entry(void) { 650 651 // rbx,: methodOop 652 // rcx: receiver (preserve for slow entry into asm interpreter) 653 654 // rsi: senderSP must preserved for slow path, set SP to it on fast path 655 656 address entry_point = __ pc(); 657 Label xreturn_path; 658 659 // do fastpath for resolved accessor methods 660 if (UseFastAccessorMethods) { 661 Label slow_path; 662 // If we need a safepoint check, generate full interpreter entry. 663 ExternalAddress state(SafepointSynchronize::address_of_state()); 664 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 665 SafepointSynchronize::_not_synchronized); 666 667 __ jcc(Assembler::notEqual, slow_path); 668 // ASM/C++ Interpreter 669 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1 670 // Note: We can only use this code if the getfield has been resolved 671 // and if we don't have a null-pointer exception => check for 672 // these conditions first and use slow path if necessary. 673 // rbx,: method 674 // rcx: receiver 675 __ movptr(rax, Address(rsp, wordSize)); 676 677 // check if local 0 != NULL and read field 678 __ testptr(rax, rax); 679 __ jcc(Assembler::zero, slow_path); 680 681 __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset())); 682 // read first instruction word and extract bytecode @ 1 and index @ 2 683 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); 684 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); 685 // Shift codes right to get the index on the right. 686 // The bytecode fetched looks like <index><0xb4><0x2a> 687 __ shrl(rdx, 2*BitsPerByte); 688 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); 689 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); 690 691 // rax,: local 0 692 // rbx,: method 693 // rcx: receiver - do not destroy since it is needed for slow path! 694 // rcx: scratch 695 // rdx: constant pool cache index 696 // rdi: constant pool cache 697 // rsi: sender sp 698 699 // check if getfield has been resolved and read constant pool cache entry 700 // check the validity of the cache entry by testing whether _indices field 701 // contains Bytecode::_getfield in b1 byte. 702 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); 703 __ movl(rcx, 704 Address(rdi, 705 rdx, 706 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); 707 __ shrl(rcx, 2*BitsPerByte); 708 __ andl(rcx, 0xFF); 709 __ cmpl(rcx, Bytecodes::_getfield); 710 __ jcc(Assembler::notEqual, slow_path); 711 712 // Note: constant pool entry is not valid before bytecode is resolved 713 __ movptr(rcx, 714 Address(rdi, 715 rdx, 716 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())); 717 __ movl(rdx, 718 Address(rdi, 719 rdx, 720 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); 721 722 Label notByte, notShort, notChar; 723 const Address field_address (rax, rcx, Address::times_1); 724 725 // Need to differentiate between igetfield, agetfield, bgetfield etc. 726 // because they are different sizes. 727 // Use the type from the constant pool cache 728 __ shrl(rdx, ConstantPoolCacheEntry::tosBits); 729 // Make sure we don't need to mask rdx for tosBits after the above shift 730 ConstantPoolCacheEntry::verify_tosBits(); 731 __ cmpl(rdx, btos); 732 __ jcc(Assembler::notEqual, notByte); 733 __ load_signed_byte(rax, field_address); 734 __ jmp(xreturn_path); 735 736 __ bind(notByte); 737 __ cmpl(rdx, stos); 738 __ jcc(Assembler::notEqual, notShort); 739 __ load_signed_short(rax, field_address); 740 __ jmp(xreturn_path); 741 742 __ bind(notShort); 743 __ cmpl(rdx, ctos); 744 __ jcc(Assembler::notEqual, notChar); 745 __ load_unsigned_short(rax, field_address); 746 __ jmp(xreturn_path); 747 748 __ bind(notChar); 749 #ifdef ASSERT 750 Label okay; 751 __ cmpl(rdx, atos); 752 __ jcc(Assembler::equal, okay); 753 __ cmpl(rdx, itos); 754 __ jcc(Assembler::equal, okay); 755 __ stop("what type is this?"); 756 __ bind(okay); 757 #endif // ASSERT 758 // All the rest are a 32 bit wordsize 759 // This is ok for now. Since fast accessors should be going away 760 __ movptr(rax, field_address); 761 762 __ bind(xreturn_path); 763 764 // _ireturn/_areturn 765 __ pop(rdi); // get return address 766 __ mov(rsp, rsi); // set sp to sender sp 767 __ jmp(rdi); 768 769 // generate a vanilla interpreter entry as the slow path 770 __ bind(slow_path); 771 772 (void) generate_normal_entry(false); 773 return entry_point; 774 } 775 return NULL; 776 777 } 778 779 // Method entry for java.lang.ref.Reference.get. 780 address InterpreterGenerator::generate_Reference_get_entry(void) { 781 #ifndef SERIALGC 782 // Code: _aload_0, _getfield, _areturn 783 // parameter size = 1 784 // 785 // The code that gets generated by this routine is split into 2 parts: 786 // 1. The "intrinsified" code for G1 (or any SATB based GC), 787 // 2. The slow path - which is an expansion of the regular method entry. 788 // 789 // Notes:- 790 // * In the G1 code we do not check whether we need to block for 791 // a safepoint. If G1 is enabled then we must execute the specialized 792 // code for Reference.get (except when the Reference object is null) 793 // so that we can log the value in the referent field with an SATB 794 // update buffer. 795 // If the code for the getfield template is modified so that the 796 // G1 pre-barrier code is executed when the current method is 797 // Reference.get() then going through the normal method entry 798 // will be fine. 799 // * The G1 code below can, however, check the receiver object (the instance 800 // of java.lang.Reference) and jump to the slow path if null. If the 801 // Reference object is null then we obviously cannot fetch the referent 802 // and so we don't need to call the G1 pre-barrier. Thus we can use the 803 // regular method entry code to generate the NPE. 804 // 805 // This code is based on generate_accessor_enty. 806 807 // rbx,: methodOop 808 // rcx: receiver (preserve for slow entry into asm interpreter) 809 810 // rsi: senderSP must preserved for slow path, set SP to it on fast path 811 812 address entry = __ pc(); 813 814 const int referent_offset = java_lang_ref_Reference::referent_offset; 815 guarantee(referent_offset > 0, "referent offset not initialized"); 816 817 if (UseG1GC) { 818 Label slow_path; 819 820 // Check if local 0 != NULL 821 // If the receiver is null then it is OK to jump to the slow path. 822 __ movptr(rax, Address(rsp, wordSize)); 823 __ testptr(rax, rax); 824 __ jcc(Assembler::zero, slow_path); 825 826 // rax: local 0 (must be preserved across the G1 barrier call) 827 // 828 // rbx: method (at this point it's scratch) 829 // rcx: receiver (at this point it's scratch) 830 // rdx: scratch 831 // rdi: scratch 832 // 833 // rsi: sender sp 834 835 // Preserve the sender sp in case the pre-barrier 836 // calls the runtime 837 __ push(rsi); 838 839 // Load the value of the referent field. 840 const Address field_address(rax, referent_offset); 841 __ movptr(rax, field_address); 842 843 // Generate the G1 pre-barrier code to log the value of 844 // the referent field in an SATB buffer. 845 __ get_thread(rcx); 846 __ g1_write_barrier_pre(noreg /* obj */, 847 rax /* pre_val */, 848 rcx /* thread */, 849 rbx /* tmp */, 850 true /* tosca_save */, 851 true /* expand_call */); 852 853 // _areturn 854 __ pop(rsi); // get sender sp 855 __ pop(rdi); // get return address 856 __ mov(rsp, rsi); // set sp to sender sp 857 __ jmp(rdi); 858 859 __ bind(slow_path); 860 (void) generate_normal_entry(false); 861 862 return entry; 863 } 864 #endif // SERIALGC 865 866 // If G1 is not enabled then attempt to go through the accessor entry point 867 // Reference.get is an accessor 868 return generate_accessor_entry(); 869 } 870 871 // 872 // Interpreter stub for calling a native method. (asm interpreter) 873 // This sets up a somewhat different looking stack for calling the native method 874 // than the typical interpreter frame setup. 875 // 876 877 address InterpreterGenerator::generate_native_entry(bool synchronized) { 878 // determine code generation flags 879 bool inc_counter = UseCompiler || CountCompiledCalls; 880 881 // rbx,: methodOop 882 // rsi: sender sp 883 // rsi: previous interpreter state (C++ interpreter) must preserve 884 address entry_point = __ pc(); 885 886 887 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); 888 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); 889 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); 890 891 // get parameter size (always needed) 892 __ load_unsigned_short(rcx, size_of_parameters); 893 894 // native calls don't need the stack size check since they have no expression stack 895 // and the arguments are already on the stack and we only add a handful of words 896 // to the stack 897 898 // rbx,: methodOop 899 // rcx: size of parameters 900 // rsi: sender sp 901 902 __ pop(rax); // get return address 903 // for natives the size of locals is zero 904 905 // compute beginning of parameters (rdi) 906 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 907 908 909 // add 2 zero-initialized slots for native calls 910 // NULL result handler 911 __ push((int32_t)NULL_WORD); 912 // NULL oop temp (mirror or jni oop result) 913 __ push((int32_t)NULL_WORD); 914 915 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count 916 // initialize fixed part of activation frame 917 918 generate_fixed_frame(true); 919 920 // make sure method is native & not abstract 921 #ifdef ASSERT 922 __ movl(rax, access_flags); 923 { 924 Label L; 925 __ testl(rax, JVM_ACC_NATIVE); 926 __ jcc(Assembler::notZero, L); 927 __ stop("tried to execute non-native method as native"); 928 __ bind(L); 929 } 930 { Label L; 931 __ testl(rax, JVM_ACC_ABSTRACT); 932 __ jcc(Assembler::zero, L); 933 __ stop("tried to execute abstract method in interpreter"); 934 __ bind(L); 935 } 936 #endif 937 938 // Since at this point in the method invocation the exception handler 939 // would try to exit the monitor of synchronized methods which hasn't 940 // been entered yet, we set the thread local variable 941 // _do_not_unlock_if_synchronized to true. The remove_activation will 942 // check this flag. 943 944 __ get_thread(rax); 945 const Address do_not_unlock_if_synchronized(rax, 946 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 947 __ movbool(do_not_unlock_if_synchronized, true); 948 949 // increment invocation count & check for overflow 950 Label invocation_counter_overflow; 951 if (inc_counter) { 952 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 953 } 954 955 Label continue_after_compile; 956 __ bind(continue_after_compile); 957 958 bang_stack_shadow_pages(true); 959 960 // reset the _do_not_unlock_if_synchronized flag 961 __ get_thread(rax); 962 __ movbool(do_not_unlock_if_synchronized, false); 963 964 // check for synchronized methods 965 // Must happen AFTER invocation_counter check and stack overflow check, 966 // so method is not locked if overflows. 967 // 968 if (synchronized) { 969 lock_method(); 970 } else { 971 // no synchronization necessary 972 #ifdef ASSERT 973 { Label L; 974 __ movl(rax, access_flags); 975 __ testl(rax, JVM_ACC_SYNCHRONIZED); 976 __ jcc(Assembler::zero, L); 977 __ stop("method needs synchronization"); 978 __ bind(L); 979 } 980 #endif 981 } 982 983 // start execution 984 #ifdef ASSERT 985 { Label L; 986 const Address monitor_block_top (rbp, 987 frame::interpreter_frame_monitor_block_top_offset * wordSize); 988 __ movptr(rax, monitor_block_top); 989 __ cmpptr(rax, rsp); 990 __ jcc(Assembler::equal, L); 991 __ stop("broken stack frame setup in interpreter"); 992 __ bind(L); 993 } 994 #endif 995 996 // jvmti/dtrace support 997 __ notify_method_entry(); 998 999 // work registers 1000 const Register method = rbx; 1001 const Register thread = rdi; 1002 const Register t = rcx; 1003 1004 // allocate space for parameters 1005 __ get_method(method); 1006 __ verify_oop(method); 1007 __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset())); 1008 __ shlptr(t, Interpreter::logStackElementSize); 1009 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 1010 __ subptr(rsp, t); 1011 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 1012 1013 // get signature handler 1014 { Label L; 1015 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); 1016 __ testptr(t, t); 1017 __ jcc(Assembler::notZero, L); 1018 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 1019 __ get_method(method); 1020 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset())); 1021 __ bind(L); 1022 } 1023 1024 // call signature handler 1025 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code"); 1026 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code"); 1027 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code"); 1028 // The generated handlers do not touch RBX (the method oop). 1029 // However, large signatures cannot be cached and are generated 1030 // each time here. The slow-path generator will blow RBX 1031 // sometime, so we must reload it after the call. 1032 __ call(t); 1033 __ get_method(method); // slow path call blows RBX on DevStudio 5.0 1034 1035 // result handler is in rax, 1036 // set result handler 1037 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax); 1038 1039 // pass mirror handle if static call 1040 { Label L; 1041 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 1042 __ movl(t, Address(method, methodOopDesc::access_flags_offset())); 1043 __ testl(t, JVM_ACC_STATIC); 1044 __ jcc(Assembler::zero, L); 1045 // get mirror 1046 __ movptr(t, Address(method, methodOopDesc:: constants_offset())); 1047 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); 1048 __ movptr(t, Address(t, mirror_offset)); 1049 // copy mirror into activation frame 1050 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); 1051 // pass handle to mirror 1052 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1053 __ movptr(Address(rsp, wordSize), t); 1054 __ bind(L); 1055 } 1056 1057 // get native function entry point 1058 { Label L; 1059 __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); 1060 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1061 __ cmpptr(rax, unsatisfied.addr()); 1062 __ jcc(Assembler::notEqual, L); 1063 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 1064 __ get_method(method); 1065 __ verify_oop(method); 1066 __ movptr(rax, Address(method, methodOopDesc::native_function_offset())); 1067 __ bind(L); 1068 } 1069 1070 // pass JNIEnv 1071 __ get_thread(thread); 1072 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1073 __ movptr(Address(rsp, 0), t); 1074 1075 // set_last_Java_frame_before_call 1076 // It is enough that the pc() 1077 // points into the right code segment. It does not have to be the correct return pc. 1078 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1079 1080 // change thread state 1081 #ifdef ASSERT 1082 { Label L; 1083 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1084 __ cmpl(t, _thread_in_Java); 1085 __ jcc(Assembler::equal, L); 1086 __ stop("Wrong thread state in native stub"); 1087 __ bind(L); 1088 } 1089 #endif 1090 1091 // Change state to native 1092 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); 1093 __ call(rax); 1094 1095 // result potentially in rdx:rax or ST0 1096 1097 // Either restore the MXCSR register after returning from the JNI Call 1098 // or verify that it wasn't changed. 1099 if (VM_Version::supports_sse()) { 1100 if (RestoreMXCSROnJNICalls) { 1101 __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std())); 1102 } 1103 else if (CheckJNICalls ) { 1104 __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 1105 } 1106 } 1107 1108 // Either restore the x87 floating pointer control word after returning 1109 // from the JNI call or verify that it wasn't changed. 1110 if (CheckJNICalls) { 1111 __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 1112 } 1113 1114 // save potential result in ST(0) & rdx:rax 1115 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1116 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1117 // It is safe to do this push because state is _thread_in_native and return address will be found 1118 // via _last_native_pc and not via _last_jave_sp 1119 1120 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1121 // If the order changes or anything else is added to the stack the code in 1122 // interpreter_frame_result will have to be changed. 1123 1124 { Label L; 1125 Label push_double; 1126 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1127 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1128 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1129 float_handler.addr()); 1130 __ jcc(Assembler::equal, push_double); 1131 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1132 double_handler.addr()); 1133 __ jcc(Assembler::notEqual, L); 1134 __ bind(push_double); 1135 __ push(dtos); 1136 __ bind(L); 1137 } 1138 __ push(ltos); 1139 1140 // change thread state 1141 __ get_thread(thread); 1142 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); 1143 if(os::is_MP()) { 1144 if (UseMembar) { 1145 // Force this write out before the read below 1146 __ membar(Assembler::Membar_mask_bits( 1147 Assembler::LoadLoad | Assembler::LoadStore | 1148 Assembler::StoreLoad | Assembler::StoreStore)); 1149 } else { 1150 // Write serialization page so VM thread can do a pseudo remote membar. 1151 // We use the current thread pointer to calculate a thread specific 1152 // offset to write to within the page. This minimizes bus traffic 1153 // due to cache line collision. 1154 __ serialize_memory(thread, rcx); 1155 } 1156 } 1157 1158 if (AlwaysRestoreFPU) { 1159 // Make sure the control word is correct. 1160 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1161 } 1162 1163 // check for safepoint operation in progress and/or pending suspend requests 1164 { Label Continue; 1165 1166 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1167 SafepointSynchronize::_not_synchronized); 1168 1169 Label L; 1170 __ jcc(Assembler::notEqual, L); 1171 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1172 __ jcc(Assembler::equal, Continue); 1173 __ bind(L); 1174 1175 // Don't use call_VM as it will see a possible pending exception and forward it 1176 // and never return here preventing us from clearing _last_native_pc down below. 1177 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are 1178 // preserved and correspond to the bcp/locals pointers. So we do a runtime call 1179 // by hand. 1180 // 1181 __ push(thread); 1182 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1183 JavaThread::check_special_condition_for_native_trans))); 1184 __ increment(rsp, wordSize); 1185 __ get_thread(thread); 1186 1187 __ bind(Continue); 1188 } 1189 1190 // change thread state 1191 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1192 1193 __ reset_last_Java_frame(thread, true, true); 1194 1195 // reset handle block 1196 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1197 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD); 1198 1199 // If result was an oop then unbox and save it in the frame 1200 { Label L; 1201 Label no_oop, store_result; 1202 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT)); 1203 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), 1204 handler.addr()); 1205 __ jcc(Assembler::notEqual, no_oop); 1206 __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD); 1207 __ pop(ltos); 1208 __ testptr(rax, rax); 1209 __ jcc(Assembler::zero, store_result); 1210 // unbox 1211 __ movptr(rax, Address(rax, 0)); 1212 __ bind(store_result); 1213 __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax); 1214 // keep stack depth as expected by pushing oop which will eventually be discarded 1215 __ push(ltos); 1216 __ bind(no_oop); 1217 } 1218 1219 { 1220 Label no_reguard; 1221 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); 1222 __ jcc(Assembler::notEqual, no_reguard); 1223 1224 __ pusha(); 1225 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1226 __ popa(); 1227 1228 __ bind(no_reguard); 1229 } 1230 1231 // restore rsi to have legal interpreter frame, 1232 // i.e., bci == 0 <=> rsi == code_base() 1233 // Can't call_VM until bcp is within reasonable. 1234 __ get_method(method); // method is junk from thread_in_native to now. 1235 __ verify_oop(method); 1236 __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop 1237 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase 1238 1239 // handle exceptions (exception handling will handle unlocking!) 1240 { Label L; 1241 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1242 __ jcc(Assembler::zero, L); 1243 // Note: At some point we may want to unify this with the code used in call_VM_base(); 1244 // i.e., we should use the StubRoutines::forward_exception code. For now this 1245 // doesn't work here because the rsp is not correctly set at this point. 1246 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1247 __ should_not_reach_here(); 1248 __ bind(L); 1249 } 1250 1251 // do unlocking if necessary 1252 { Label L; 1253 __ movl(t, Address(method, methodOopDesc::access_flags_offset())); 1254 __ testl(t, JVM_ACC_SYNCHRONIZED); 1255 __ jcc(Assembler::zero, L); 1256 // the code below should be shared with interpreter macro assembler implementation 1257 { Label unlock; 1258 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1259 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1260 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); 1261 1262 __ lea(rdx, monitor); // address of first monitor 1263 1264 __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); 1265 __ testptr(t, t); 1266 __ jcc(Assembler::notZero, unlock); 1267 1268 // Entry already unlocked, need to throw exception 1269 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1270 __ should_not_reach_here(); 1271 1272 __ bind(unlock); 1273 __ unlock_object(rdx); 1274 } 1275 __ bind(L); 1276 } 1277 1278 // jvmti/dtrace support 1279 // Note: This must happen _after_ handling/throwing any exceptions since 1280 // the exception handler code notifies the runtime of method exits 1281 // too. If this happens before, method entry/exit notifications are 1282 // not properly paired (was bug - gri 11/22/99). 1283 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1284 1285 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result 1286 __ pop(ltos); 1287 __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1288 __ call(t); 1289 1290 // remove activation 1291 __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp 1292 __ leave(); // remove frame anchor 1293 __ pop(rdi); // get return address 1294 __ mov(rsp, t); // set sp to sender sp 1295 __ jmp(rdi); 1296 1297 if (inc_counter) { 1298 // Handle overflow of counter and compile method 1299 __ bind(invocation_counter_overflow); 1300 generate_counter_overflow(&continue_after_compile); 1301 } 1302 1303 return entry_point; 1304 } 1305 1306 // 1307 // Generic interpreted method entry to (asm) interpreter 1308 // 1309 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1310 // determine code generation flags 1311 bool inc_counter = UseCompiler || CountCompiledCalls; 1312 1313 // rbx,: methodOop 1314 // rsi: sender sp 1315 address entry_point = __ pc(); 1316 1317 1318 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset()); 1319 const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset()); 1320 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); 1321 const Address access_flags (rbx, methodOopDesc::access_flags_offset()); 1322 1323 // get parameter size (always needed) 1324 __ load_unsigned_short(rcx, size_of_parameters); 1325 1326 // rbx,: methodOop 1327 // rcx: size of parameters 1328 1329 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1330 1331 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1332 __ subl(rdx, rcx); // rdx = no. of additional locals 1333 1334 // see if we've got enough room on the stack for locals plus overhead. 1335 generate_stack_overflow_check(); 1336 1337 // get return address 1338 __ pop(rax); 1339 1340 // compute beginning of parameters (rdi) 1341 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1342 1343 // rdx - # of additional locals 1344 // allocate space for locals 1345 // explicitly initialize locals 1346 { 1347 Label exit, loop; 1348 __ testl(rdx, rdx); 1349 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1350 __ bind(loop); 1351 __ push((int32_t)NULL_WORD); // initialize local variables 1352 __ decrement(rdx); // until everything initialized 1353 __ jcc(Assembler::greater, loop); 1354 __ bind(exit); 1355 } 1356 1357 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count 1358 // initialize fixed part of activation frame 1359 generate_fixed_frame(false); 1360 1361 // make sure method is not native & not abstract 1362 #ifdef ASSERT 1363 __ movl(rax, access_flags); 1364 { 1365 Label L; 1366 __ testl(rax, JVM_ACC_NATIVE); 1367 __ jcc(Assembler::zero, L); 1368 __ stop("tried to execute native method as non-native"); 1369 __ bind(L); 1370 } 1371 { Label L; 1372 __ testl(rax, JVM_ACC_ABSTRACT); 1373 __ jcc(Assembler::zero, L); 1374 __ stop("tried to execute abstract method in interpreter"); 1375 __ bind(L); 1376 } 1377 #endif 1378 1379 // Since at this point in the method invocation the exception handler 1380 // would try to exit the monitor of synchronized methods which hasn't 1381 // been entered yet, we set the thread local variable 1382 // _do_not_unlock_if_synchronized to true. The remove_activation will 1383 // check this flag. 1384 1385 __ get_thread(rax); 1386 const Address do_not_unlock_if_synchronized(rax, 1387 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1388 __ movbool(do_not_unlock_if_synchronized, true); 1389 1390 // increment invocation count & check for overflow 1391 Label invocation_counter_overflow; 1392 Label profile_method; 1393 Label profile_method_continue; 1394 if (inc_counter) { 1395 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1396 if (ProfileInterpreter) { 1397 __ bind(profile_method_continue); 1398 } 1399 } 1400 Label continue_after_compile; 1401 __ bind(continue_after_compile); 1402 1403 bang_stack_shadow_pages(false); 1404 1405 // reset the _do_not_unlock_if_synchronized flag 1406 __ get_thread(rax); 1407 __ movbool(do_not_unlock_if_synchronized, false); 1408 1409 // check for synchronized methods 1410 // Must happen AFTER invocation_counter check and stack overflow check, 1411 // so method is not locked if overflows. 1412 // 1413 if (synchronized) { 1414 // Allocate monitor and lock method 1415 lock_method(); 1416 } else { 1417 // no synchronization necessary 1418 #ifdef ASSERT 1419 { Label L; 1420 __ movl(rax, access_flags); 1421 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1422 __ jcc(Assembler::zero, L); 1423 __ stop("method needs synchronization"); 1424 __ bind(L); 1425 } 1426 #endif 1427 } 1428 1429 // start execution 1430 #ifdef ASSERT 1431 { Label L; 1432 const Address monitor_block_top (rbp, 1433 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1434 __ movptr(rax, monitor_block_top); 1435 __ cmpptr(rax, rsp); 1436 __ jcc(Assembler::equal, L); 1437 __ stop("broken stack frame setup in interpreter"); 1438 __ bind(L); 1439 } 1440 #endif 1441 1442 // jvmti support 1443 __ notify_method_entry(); 1444 1445 __ dispatch_next(vtos); 1446 1447 // invocation counter overflow 1448 if (inc_counter) { 1449 if (ProfileInterpreter) { 1450 // We have decided to profile this method in the interpreter 1451 __ bind(profile_method); 1452 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1453 __ set_method_data_pointer_for_bcp(); 1454 __ get_method(rbx); 1455 __ jmp(profile_method_continue); 1456 } 1457 // Handle overflow of counter and compile method 1458 __ bind(invocation_counter_overflow); 1459 generate_counter_overflow(&continue_after_compile); 1460 } 1461 1462 return entry_point; 1463 } 1464 1465 //------------------------------------------------------------------------------------------------------------------------ 1466 // Entry points 1467 // 1468 // Here we generate the various kind of entries into the interpreter. 1469 // The two main entry type are generic bytecode methods and native call method. 1470 // These both come in synchronized and non-synchronized versions but the 1471 // frame layout they create is very similar. The other method entry 1472 // types are really just special purpose entries that are really entry 1473 // and interpretation all in one. These are for trivial methods like 1474 // accessor, empty, or special math methods. 1475 // 1476 // When control flow reaches any of the entry types for the interpreter 1477 // the following holds -> 1478 // 1479 // Arguments: 1480 // 1481 // rbx,: methodOop 1482 // rcx: receiver 1483 // 1484 // 1485 // Stack layout immediately at entry 1486 // 1487 // [ return address ] <--- rsp 1488 // [ parameter n ] 1489 // ... 1490 // [ parameter 1 ] 1491 // [ expression stack ] (caller's java expression stack) 1492 1493 // Assuming that we don't go to one of the trivial specialized 1494 // entries the stack will look like below when we are ready to execute 1495 // the first bytecode (or call the native routine). The register usage 1496 // will be as the template based interpreter expects (see interpreter_x86.hpp). 1497 // 1498 // local variables follow incoming parameters immediately; i.e. 1499 // the return address is moved to the end of the locals). 1500 // 1501 // [ monitor entry ] <--- rsp 1502 // ... 1503 // [ monitor entry ] 1504 // [ expr. stack bottom ] 1505 // [ saved rsi ] 1506 // [ current rdi ] 1507 // [ methodOop ] 1508 // [ saved rbp, ] <--- rbp, 1509 // [ return address ] 1510 // [ local variable m ] 1511 // ... 1512 // [ local variable 1 ] 1513 // [ parameter n ] 1514 // ... 1515 // [ parameter 1 ] <--- rdi 1516 1517 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) { 1518 // determine code generation flags 1519 bool synchronized = false; 1520 address entry_point = NULL; 1521 1522 switch (kind) { 1523 case Interpreter::zerolocals : break; 1524 case Interpreter::zerolocals_synchronized: synchronized = true; break; 1525 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; 1526 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break; 1527 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; 1528 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; 1529 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; 1530 case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break; 1531 1532 case Interpreter::java_lang_math_sin : // fall thru 1533 case Interpreter::java_lang_math_cos : // fall thru 1534 case Interpreter::java_lang_math_tan : // fall thru 1535 case Interpreter::java_lang_math_abs : // fall thru 1536 case Interpreter::java_lang_math_log : // fall thru 1537 case Interpreter::java_lang_math_log10 : // fall thru 1538 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; 1539 case Interpreter::java_lang_ref_reference_get 1540 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; 1541 default : ShouldNotReachHere(); break; 1542 } 1543 1544 if (entry_point) return entry_point; 1545 1546 return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized); 1547 1548 } 1549 1550 // These should never be compiled since the interpreter will prefer 1551 // the compiled version to the intrinsic version. 1552 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1553 switch (method_kind(m)) { 1554 case Interpreter::java_lang_math_sin : // fall thru 1555 case Interpreter::java_lang_math_cos : // fall thru 1556 case Interpreter::java_lang_math_tan : // fall thru 1557 case Interpreter::java_lang_math_abs : // fall thru 1558 case Interpreter::java_lang_math_log : // fall thru 1559 case Interpreter::java_lang_math_log10 : // fall thru 1560 case Interpreter::java_lang_math_sqrt : 1561 return false; 1562 default: 1563 return true; 1564 } 1565 } 1566 1567 // How much stack a method activation needs in words. 1568 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { 1569 1570 const int stub_code = 4; // see generate_call_stub 1571 // Save space for one monitor to get into the interpreted method in case 1572 // the method is synchronized 1573 int monitor_size = method->is_synchronized() ? 1574 1*frame::interpreter_frame_monitor_size() : 0; 1575 1576 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). 1577 // be sure to change this if you add/subtract anything to/from the overhead area 1578 const int overhead_size = -frame::interpreter_frame_initial_sp_offset; 1579 1580 const int extra_stack = methodOopDesc::extra_stack_entries(); 1581 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * 1582 Interpreter::stackElementWords; 1583 return overhead_size + method_stack + stub_code; 1584 } 1585 1586 // asm based interpreter deoptimization helpers 1587 1588 int AbstractInterpreter::layout_activation(methodOop method, 1589 int tempcount, 1590 int popframe_extra_args, 1591 int moncount, 1592 int caller_actual_parameters, 1593 int callee_param_count, 1594 int callee_locals, 1595 frame* caller, 1596 frame* interpreter_frame, 1597 bool is_top_frame) { 1598 // Note: This calculation must exactly parallel the frame setup 1599 // in AbstractInterpreterGenerator::generate_method_entry. 1600 // If interpreter_frame!=NULL, set up the method, locals, and monitors. 1601 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size, 1602 // as determined by a previous call to this method. 1603 // It is also guaranteed to be walkable even though it is in a skeletal state 1604 // NOTE: return size is in words not bytes 1605 1606 // fixed size of an interpreter frame: 1607 int max_locals = method->max_locals() * Interpreter::stackElementWords; 1608 int extra_locals = (method->max_locals() - method->size_of_parameters()) * 1609 Interpreter::stackElementWords; 1610 1611 int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset; 1612 1613 // Our locals were accounted for by the caller (or last_frame_adjust on the transistion) 1614 // Since the callee parameters already account for the callee's params we only need to account for 1615 // the extra locals. 1616 1617 1618 int size = overhead + 1619 ((callee_locals - callee_param_count)*Interpreter::stackElementWords) + 1620 (moncount*frame::interpreter_frame_monitor_size()) + 1621 tempcount*Interpreter::stackElementWords + popframe_extra_args; 1622 1623 if (interpreter_frame != NULL) { 1624 #ifdef ASSERT 1625 if (!EnableInvokeDynamic) 1626 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences? 1627 // Probably, since deoptimization doesn't work yet. 1628 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable"); 1629 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)"); 1630 #endif 1631 1632 interpreter_frame->interpreter_frame_set_method(method); 1633 // NOTE the difference in using sender_sp and interpreter_frame_sender_sp 1634 // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp) 1635 // and sender_sp is fp+8 1636 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; 1637 1638 interpreter_frame->interpreter_frame_set_locals(locals); 1639 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); 1640 BasicObjectLock* monbot = montop - moncount; 1641 interpreter_frame->interpreter_frame_set_monitor_end(monbot); 1642 1643 // Set last_sp 1644 intptr_t* rsp = (intptr_t*) monbot - 1645 tempcount*Interpreter::stackElementWords - 1646 popframe_extra_args; 1647 interpreter_frame->interpreter_frame_set_last_sp(rsp); 1648 1649 // All frames but the initial (oldest) interpreter frame we fill in have a 1650 // value for sender_sp that allows walking the stack but isn't 1651 // truly correct. Correct the value here. 1652 1653 if (extra_locals != 0 && 1654 interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) { 1655 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals); 1656 } 1657 *interpreter_frame->interpreter_frame_cache_addr() = 1658 method->constants()->cache(); 1659 } 1660 return size; 1661 } 1662 1663 1664 //------------------------------------------------------------------------------------------------------------------------ 1665 // Exceptions 1666 1667 void TemplateInterpreterGenerator::generate_throw_exception() { 1668 // Entry point in previous activation (i.e., if the caller was interpreted) 1669 Interpreter::_rethrow_exception_entry = __ pc(); 1670 const Register thread = rcx; 1671 1672 // Restore sp to interpreter_frame_last_sp even though we are going 1673 // to empty the expression stack for the exception processing. 1674 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1675 // rax,: exception 1676 // rdx: return address/pc that threw exception 1677 __ restore_bcp(); // rsi points to call/send 1678 __ restore_locals(); 1679 1680 // Entry point for exceptions thrown within interpreter code 1681 Interpreter::_throw_exception_entry = __ pc(); 1682 // expression stack is undefined here 1683 // rax,: exception 1684 // rsi: exception bcp 1685 __ verify_oop(rax); 1686 1687 // expression stack must be empty before entering the VM in case of an exception 1688 __ empty_expression_stack(); 1689 __ empty_FPU_stack(); 1690 // find exception handler address and preserve exception oop 1691 __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax); 1692 // rax,: exception handler entry point 1693 // rdx: preserved exception oop 1694 // rsi: bcp for exception handler 1695 __ push_ptr(rdx); // push exception which is now the only value on the stack 1696 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1697 1698 // If the exception is not handled in the current frame the frame is removed and 1699 // the exception is rethrown (i.e. exception continuation is _rethrow_exception). 1700 // 1701 // Note: At this point the bci is still the bxi for the instruction which caused 1702 // the exception and the expression stack is empty. Thus, for any VM calls 1703 // at this point, GC will find a legal oop map (with empty expression stack). 1704 1705 // In current activation 1706 // tos: exception 1707 // rsi: exception bcp 1708 1709 // 1710 // JVMTI PopFrame support 1711 // 1712 1713 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1714 __ empty_expression_stack(); 1715 __ empty_FPU_stack(); 1716 // Set the popframe_processing bit in pending_popframe_condition indicating that we are 1717 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1718 // popframe handling cycles. 1719 __ get_thread(thread); 1720 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1721 __ orl(rdx, JavaThread::popframe_processing_bit); 1722 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1723 1724 { 1725 // Check to see whether we are returning to a deoptimized frame. 1726 // (The PopFrame call ensures that the caller of the popped frame is 1727 // either interpreted or compiled and deoptimizes it if compiled.) 1728 // In this case, we can't call dispatch_next() after the frame is 1729 // popped, but instead must save the incoming arguments and restore 1730 // them after deoptimization has occurred. 1731 // 1732 // Note that we don't compare the return PC against the 1733 // deoptimization blob's unpack entry because of the presence of 1734 // adapter frames in C2. 1735 Label caller_not_deoptimized; 1736 __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize)); 1737 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx); 1738 __ testl(rax, rax); 1739 __ jcc(Assembler::notZero, caller_not_deoptimized); 1740 1741 // Compute size of arguments for saving when returning to deoptimized caller 1742 __ get_method(rax); 1743 __ verify_oop(rax); 1744 __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset()))); 1745 __ shlptr(rax, Interpreter::logStackElementSize); 1746 __ restore_locals(); 1747 __ subptr(rdi, rax); 1748 __ addptr(rdi, wordSize); 1749 // Save these arguments 1750 __ get_thread(thread); 1751 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi); 1752 1753 __ remove_activation(vtos, rdx, 1754 /* throw_monitor_exception */ false, 1755 /* install_monitor_exception */ false, 1756 /* notify_jvmdi */ false); 1757 1758 // Inform deoptimization that it is responsible for restoring these arguments 1759 __ get_thread(thread); 1760 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit); 1761 1762 // Continue in deoptimization handler 1763 __ jmp(rdx); 1764 1765 __ bind(caller_not_deoptimized); 1766 } 1767 1768 __ remove_activation(vtos, rdx, 1769 /* throw_monitor_exception */ false, 1770 /* install_monitor_exception */ false, 1771 /* notify_jvmdi */ false); 1772 1773 // Finish with popframe handling 1774 // A previous I2C followed by a deoptimization might have moved the 1775 // outgoing arguments further up the stack. PopFrame expects the 1776 // mutations to those outgoing arguments to be preserved and other 1777 // constraints basically require this frame to look exactly as 1778 // though it had previously invoked an interpreted activation with 1779 // no space between the top of the expression stack (current 1780 // last_sp) and the top of stack. Rather than force deopt to 1781 // maintain this kind of invariant all the time we call a small 1782 // fixup routine to move the mutated arguments onto the top of our 1783 // expression stack if necessary. 1784 __ mov(rax, rsp); 1785 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1786 __ get_thread(thread); 1787 // PC must point into interpreter here 1788 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1789 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1790 __ get_thread(thread); 1791 __ reset_last_Java_frame(thread, true, true); 1792 // Restore the last_sp and null it out 1793 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1794 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1795 1796 __ restore_bcp(); 1797 __ restore_locals(); 1798 // The method data pointer was incremented already during 1799 // call profiling. We have to restore the mdp for the current bcp. 1800 if (ProfileInterpreter) { 1801 __ set_method_data_pointer_for_bcp(); 1802 } 1803 1804 // Clear the popframe condition flag 1805 __ get_thread(thread); 1806 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive); 1807 1808 __ dispatch_next(vtos); 1809 // end of PopFrame support 1810 1811 Interpreter::_remove_activation_entry = __ pc(); 1812 1813 // preserve exception over this code sequence 1814 __ pop_ptr(rax); 1815 __ get_thread(thread); 1816 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1817 // remove the activation (without doing throws on illegalMonitorExceptions) 1818 __ remove_activation(vtos, rdx, false, true, false); 1819 // restore exception 1820 __ get_thread(thread); 1821 __ movptr(rax, Address(thread, JavaThread::vm_result_offset())); 1822 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 1823 __ verify_oop(rax); 1824 1825 // Inbetween activations - previous activation type unknown yet 1826 // compute continuation point - the continuation point expects 1827 // the following registers set up: 1828 // 1829 // rax: exception 1830 // rdx: return address/pc that threw exception 1831 // rsp: expression stack of caller 1832 // rbp: rbp, of caller 1833 __ push(rax); // save exception 1834 __ push(rdx); // save return address 1835 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx); 1836 __ mov(rbx, rax); // save exception handler 1837 __ pop(rdx); // restore return address 1838 __ pop(rax); // restore exception 1839 // Note that an "issuing PC" is actually the next PC after the call 1840 __ jmp(rbx); // jump to exception handler of caller 1841 } 1842 1843 1844 // 1845 // JVMTI ForceEarlyReturn support 1846 // 1847 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1848 address entry = __ pc(); 1849 const Register thread = rcx; 1850 1851 __ restore_bcp(); 1852 __ restore_locals(); 1853 __ empty_expression_stack(); 1854 __ empty_FPU_stack(); 1855 __ load_earlyret_value(state); 1856 1857 __ get_thread(thread); 1858 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1859 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1860 1861 // Clear the earlyret state 1862 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1863 1864 __ remove_activation(state, rsi, 1865 false, /* throw_monitor_exception */ 1866 false, /* install_monitor_exception */ 1867 true); /* notify_jvmdi */ 1868 __ jmp(rsi); 1869 return entry; 1870 } // end of ForceEarlyReturn support 1871 1872 1873 //------------------------------------------------------------------------------------------------------------------------ 1874 // Helper for vtos entry point generation 1875 1876 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1877 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1878 Label L; 1879 fep = __ pc(); __ push(ftos); __ jmp(L); 1880 dep = __ pc(); __ push(dtos); __ jmp(L); 1881 lep = __ pc(); __ push(ltos); __ jmp(L); 1882 aep = __ pc(); __ push(atos); __ jmp(L); 1883 bep = cep = sep = // fall through 1884 iep = __ pc(); __ push(itos); // fall through 1885 vep = __ pc(); __ bind(L); // fall through 1886 generate_and_dispatch(t); 1887 } 1888 1889 //------------------------------------------------------------------------------------------------------------------------ 1890 // Generation of individual instructions 1891 1892 // helpers for generate_and_dispatch 1893 1894 1895 1896 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 1897 : TemplateInterpreterGenerator(code) { 1898 generate_all(); // down here so it can be "virtual" 1899 } 1900 1901 //------------------------------------------------------------------------------------------------------------------------ 1902 1903 // Non-product code 1904 #ifndef PRODUCT 1905 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1906 address entry = __ pc(); 1907 1908 // prepare expression stack 1909 __ pop(rcx); // pop return address so expression stack is 'pure' 1910 __ push(state); // save tosca 1911 1912 // pass tosca registers as arguments & call tracer 1913 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx); 1914 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1915 __ pop(state); // restore tosca 1916 1917 // return 1918 __ jmp(rcx); 1919 1920 return entry; 1921 } 1922 1923 1924 void TemplateInterpreterGenerator::count_bytecode() { 1925 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1926 } 1927 1928 1929 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1930 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1931 } 1932 1933 1934 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1935 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1936 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1937 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1938 ExternalAddress table((address) BytecodePairHistogram::_counters); 1939 Address index(noreg, rbx, Address::times_4); 1940 __ incrementl(ArrayAddress(table, index)); 1941 } 1942 1943 1944 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1945 // Call a little run-time stub to avoid blow-up for each bytecode. 1946 // The run-time runtime saves the right registers, depending on 1947 // the tosca in-state for the given template. 1948 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1949 "entry must have been generated"); 1950 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1951 } 1952 1953 1954 void TemplateInterpreterGenerator::stop_interpreter_at() { 1955 Label L; 1956 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1957 StopInterpreterAt); 1958 __ jcc(Assembler::notEqual, L); 1959 __ int3(); 1960 __ bind(L); 1961 } 1962 #endif // !PRODUCT 1963 #endif // CC_INTERP