1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 47 #include "utilities/macros.hpp" 48 49 #define __ _masm-> 50 51 52 #ifndef CC_INTERP 53 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 54 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; 55 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 56 57 //------------------------------------------------------------------------------------------------------------------------ 58 59 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 60 address entry = __ pc(); 61 62 // Note: There should be a minimal interpreter frame set up when stack 63 // overflow occurs since we check explicitly for it now. 64 // 65 #ifdef ASSERT 66 { Label L; 67 __ lea(rax, Address(rbp, 68 frame::interpreter_frame_monitor_block_top_offset * wordSize)); 69 __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp, 70 // (stack grows negative) 71 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 72 __ stop ("interpreter frame not set up"); 73 __ bind(L); 74 } 75 #endif // ASSERT 76 // Restore bcp under the assumption that the current frame is still 77 // interpreted 78 __ restore_bcp(); 79 80 // expression stack must be empty before entering the VM if an exception 81 // happened 82 __ empty_expression_stack(); 83 __ empty_FPU_stack(); 84 // throw exception 85 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 86 return entry; 87 } 88 89 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 90 address entry = __ pc(); 91 // expression stack must be empty before entering the VM if an exception happened 92 __ empty_expression_stack(); 93 __ empty_FPU_stack(); 94 // setup parameters 95 // ??? convention: expect aberrant index in register rbx, 96 __ lea(rax, ExternalAddress((address)name)); 97 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx); 98 return entry; 99 } 100 101 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 102 address entry = __ pc(); 103 // object is at TOS 104 __ pop(rax); 105 // expression stack must be empty before entering the VM if an exception 106 // happened 107 __ empty_expression_stack(); 108 __ empty_FPU_stack(); 109 __ call_VM(noreg, 110 CAST_FROM_FN_PTR(address, 111 InterpreterRuntime::throw_ClassCastException), 112 rax); 113 return entry; 114 } 115 116 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 117 assert(!pass_oop || message == NULL, "either oop or message but not both"); 118 address entry = __ pc(); 119 if (pass_oop) { 120 // object is at TOS 121 __ pop(rbx); 122 } 123 // expression stack must be empty before entering the VM if an exception happened 124 __ empty_expression_stack(); 125 __ empty_FPU_stack(); 126 // setup parameters 127 __ lea(rax, ExternalAddress((address)name)); 128 if (pass_oop) { 129 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx); 130 } else { 131 if (message != NULL) { 132 __ lea(rbx, ExternalAddress((address)message)); 133 } else { 134 __ movptr(rbx, NULL_WORD); 135 } 136 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx); 137 } 138 // throw exception 139 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 140 return entry; 141 } 142 143 144 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 145 address entry = __ pc(); 146 // NULL last_sp until next java call 147 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 148 __ dispatch_next(state); 149 return entry; 150 } 151 152 153 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { 154 TosState incoming_state = state; 155 address entry = __ pc(); 156 157 #ifdef COMPILER2 158 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 159 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { 160 for (int i = 1; i < 8; i++) { 161 __ ffree(i); 162 } 163 } else if (UseSSE < 2) { 164 __ empty_FPU_stack(); 165 } 166 #endif 167 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { 168 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 169 } else { 170 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 171 } 172 173 // In SSE mode, interpreter returns FP results in xmm0 but they need 174 // to end up back on the FPU so it can operate on them. 175 if (incoming_state == ftos && UseSSE >= 1) { 176 __ subptr(rsp, wordSize); 177 __ movflt(Address(rsp, 0), xmm0); 178 __ fld_s(Address(rsp, 0)); 179 __ addptr(rsp, wordSize); 180 } else if (incoming_state == dtos && UseSSE >= 2) { 181 __ subptr(rsp, 2*wordSize); 182 __ movdbl(Address(rsp, 0), xmm0); 183 __ fld_d(Address(rsp, 0)); 184 __ addptr(rsp, 2*wordSize); 185 } 186 187 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter"); 188 189 // Restore stack bottom in case i2c adjusted stack 190 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 191 // and NULL it as marker that rsp is now tos until next java call 192 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 193 194 __ restore_bcp(); 195 __ restore_locals(); 196 197 Label L_got_cache, L_giant_index; 198 if (EnableInvokeDynamic) { 199 __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic); 200 __ jcc(Assembler::equal, L_giant_index); 201 } 202 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); 203 __ bind(L_got_cache); 204 __ movl(rbx, Address(rbx, rcx, 205 Address::times_ptr, ConstantPoolCache::base_offset() + 206 ConstantPoolCacheEntry::flags_offset())); 207 __ andptr(rbx, 0xFF); 208 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); 209 __ dispatch_next(state, step); 210 211 // out of the main line of code... 212 if (EnableInvokeDynamic) { 213 __ bind(L_giant_index); 214 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); 215 __ jmp(L_got_cache); 216 } 217 218 return entry; 219 } 220 221 222 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 223 address entry = __ pc(); 224 225 // In SSE mode, FP results are in xmm0 226 if (state == ftos && UseSSE > 0) { 227 __ subptr(rsp, wordSize); 228 __ movflt(Address(rsp, 0), xmm0); 229 __ fld_s(Address(rsp, 0)); 230 __ addptr(rsp, wordSize); 231 } else if (state == dtos && UseSSE >= 2) { 232 __ subptr(rsp, 2*wordSize); 233 __ movdbl(Address(rsp, 0), xmm0); 234 __ fld_d(Address(rsp, 0)); 235 __ addptr(rsp, 2*wordSize); 236 } 237 238 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter"); 239 240 // The stack is not extended by deopt but we must NULL last_sp as this 241 // entry is like a "return". 242 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 243 __ restore_bcp(); 244 __ restore_locals(); 245 // handle exceptions 246 { Label L; 247 const Register thread = rcx; 248 __ get_thread(thread); 249 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 250 __ jcc(Assembler::zero, L); 251 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 252 __ should_not_reach_here(); 253 __ bind(L); 254 } 255 __ dispatch_next(state, step); 256 return entry; 257 } 258 259 260 int AbstractInterpreter::BasicType_as_index(BasicType type) { 261 int i = 0; 262 switch (type) { 263 case T_BOOLEAN: i = 0; break; 264 case T_CHAR : i = 1; break; 265 case T_BYTE : i = 2; break; 266 case T_SHORT : i = 3; break; 267 case T_INT : // fall through 268 case T_LONG : // fall through 269 case T_VOID : i = 4; break; 270 case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE 271 case T_DOUBLE : i = 6; break; 272 case T_OBJECT : // fall through 273 case T_ARRAY : i = 7; break; 274 default : ShouldNotReachHere(); 275 } 276 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds"); 277 return i; 278 } 279 280 281 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 282 address entry = __ pc(); 283 switch (type) { 284 case T_BOOLEAN: __ c2bool(rax); break; 285 case T_CHAR : __ andptr(rax, 0xFFFF); break; 286 case T_BYTE : __ sign_extend_byte (rax); break; 287 case T_SHORT : __ sign_extend_short(rax); break; 288 case T_INT : /* nothing to do */ break; 289 case T_DOUBLE : 290 case T_FLOAT : 291 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 292 __ pop(t); // remove return address first 293 // Must return a result for interpreter or compiler. In SSE 294 // mode, results are returned in xmm0 and the FPU stack must 295 // be empty. 296 if (type == T_FLOAT && UseSSE >= 1) { 297 // Load ST0 298 __ fld_d(Address(rsp, 0)); 299 // Store as float and empty fpu stack 300 __ fstp_s(Address(rsp, 0)); 301 // and reload 302 __ movflt(xmm0, Address(rsp, 0)); 303 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 304 __ movdbl(xmm0, Address(rsp, 0)); 305 } else { 306 // restore ST0 307 __ fld_d(Address(rsp, 0)); 308 } 309 // and pop the temp 310 __ addptr(rsp, 2 * wordSize); 311 __ push(t); // restore return address 312 } 313 break; 314 case T_OBJECT : 315 // retrieve result from frame 316 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 317 // and verify it 318 __ verify_oop(rax); 319 break; 320 default : ShouldNotReachHere(); 321 } 322 __ ret(0); // return from result handler 323 return entry; 324 } 325 326 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 327 address entry = __ pc(); 328 __ push(state); 329 __ call_VM(noreg, runtime_entry); 330 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 331 return entry; 332 } 333 334 335 // Helpers for commoning out cases in the various type of method entries. 336 // 337 338 // increment invocation count & check for overflow 339 // 340 // Note: checking for negative value instead of overflow 341 // so we have a 'sticky' overflow test 342 // 343 // rbx,: method 344 // rcx: invocation counter 345 // 346 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 347 Label done; 348 // Note: In tiered we increment either counters in MethodCounters* or in MDO 349 // depending if we're profiling or not. 350 if (TieredCompilation) { 351 int increment = InvocationCounter::count_increment; 352 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 353 Label no_mdo; 354 if (ProfileInterpreter) { 355 // Are we profiling? 356 __ movptr(rax, Address(rbx, Method::method_data_offset())); 357 __ testptr(rax, rax); 358 __ jccb(Assembler::zero, no_mdo); 359 // Increment counter in the MDO 360 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 361 in_bytes(InvocationCounter::counter_offset())); 362 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 363 __ jmp(done); 364 } 365 __ bind(no_mdo); 366 // Increment counter in MethodCounters 367 const Address invocation_counter(rax, 368 MethodCounters::invocation_counter_offset() + 369 InvocationCounter::counter_offset()); 370 371 __ get_method_counters(rbx, rax, done); 372 __ increment_mask_and_jump(invocation_counter, increment, mask, 373 rcx, false, Assembler::zero, overflow); 374 __ bind(done); 375 } else { 376 const Address backedge_counter (rax, 377 MethodCounters::backedge_counter_offset() + 378 InvocationCounter::counter_offset()); 379 const Address invocation_counter(rax, 380 MethodCounters::invocation_counter_offset() + 381 InvocationCounter::counter_offset()); 382 383 __ get_method_counters(rbx, rax, done); 384 385 if (ProfileInterpreter) { 386 __ incrementl(Address(rax, 387 MethodCounters::interpreter_invocation_counter_offset())); 388 } 389 390 // Update standard invocation counters 391 __ movl(rcx, invocation_counter); 392 __ incrementl(rcx, InvocationCounter::count_increment); 393 __ movl(invocation_counter, rcx); // save invocation count 394 395 __ movl(rax, backedge_counter); // load backedge counter 396 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 397 398 __ addl(rcx, rax); // add both counters 399 400 // profile_method is non-null only for interpreted method so 401 // profile_method != NULL == !native_call 402 // BytecodeInterpreter only calls for native so code is elided. 403 404 if (ProfileInterpreter && profile_method != NULL) { 405 // Test to see if we should create a method data oop 406 __ cmp32(rcx, 407 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 408 __ jcc(Assembler::less, *profile_method_continue); 409 410 // if no method data exists, go to profile_method 411 __ test_method_data_pointer(rax, *profile_method); 412 } 413 414 __ cmp32(rcx, 415 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 416 __ jcc(Assembler::aboveEqual, *overflow); 417 __ bind(done); 418 } 419 } 420 421 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 422 423 // Asm interpreter on entry 424 // rdi - locals 425 // rsi - bcp 426 // rbx, - method 427 // rdx - cpool 428 // rbp, - interpreter frame 429 430 // C++ interpreter on entry 431 // rsi - new interpreter state pointer 432 // rbp - interpreter frame pointer 433 // rbx - method 434 435 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 436 // rbx, - method 437 // rcx - rcvr (assuming there is one) 438 // top of stack return address of interpreter caller 439 // rsp - sender_sp 440 441 // C++ interpreter only 442 // rsi - previous interpreter state pointer 443 444 // InterpreterRuntime::frequency_counter_overflow takes one argument 445 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 446 // The call returns the address of the verified entry point for the method or NULL 447 // if the compilation did not complete (either went background or bailed out). 448 __ movptr(rax, (intptr_t)false); 449 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); 450 451 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 452 453 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame 454 // and jump to the interpreted entry. 455 __ jmp(*do_continue, relocInfo::none); 456 457 } 458 459 void InterpreterGenerator::generate_stack_overflow_check(void) { 460 // see if we've got enough room on the stack for locals plus overhead. 461 // the expression stack grows down incrementally, so the normal guard 462 // page mechanism will work for that. 463 // 464 // Registers live on entry: 465 // 466 // Asm interpreter 467 // rdx: number of additional locals this frame needs (what we must check) 468 // rbx,: Method* 469 470 // destroyed on exit 471 // rax, 472 473 // NOTE: since the additional locals are also always pushed (wasn't obvious in 474 // generate_method_entry) so the guard should work for them too. 475 // 476 477 // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp 478 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 479 480 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). 481 // be sure to change this if you add/subtract anything to/from the overhead area 482 const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size; 483 484 const int page_size = os::vm_page_size(); 485 486 Label after_frame_check; 487 488 // see if the frame is greater than one page in size. If so, 489 // then we need to verify there is enough stack space remaining 490 // for the additional locals. 491 __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize); 492 __ jcc(Assembler::belowEqual, after_frame_check); 493 494 // compute rsp as if this were going to be the last frame on 495 // the stack before the red zone 496 497 Label after_frame_check_pop; 498 499 __ push(rsi); 500 501 const Register thread = rsi; 502 503 __ get_thread(thread); 504 505 const Address stack_base(thread, Thread::stack_base_offset()); 506 const Address stack_size(thread, Thread::stack_size_offset()); 507 508 // locals + overhead, in bytes 509 __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size)); 510 511 #ifdef ASSERT 512 Label stack_base_okay, stack_size_okay; 513 // verify that thread stack base is non-zero 514 __ cmpptr(stack_base, (int32_t)NULL_WORD); 515 __ jcc(Assembler::notEqual, stack_base_okay); 516 __ stop("stack base is zero"); 517 __ bind(stack_base_okay); 518 // verify that thread stack size is non-zero 519 __ cmpptr(stack_size, 0); 520 __ jcc(Assembler::notEqual, stack_size_okay); 521 __ stop("stack size is zero"); 522 __ bind(stack_size_okay); 523 #endif 524 525 // Add stack base to locals and subtract stack size 526 __ addptr(rax, stack_base); 527 __ subptr(rax, stack_size); 528 529 // Use the maximum number of pages we might bang. 530 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 531 (StackRedPages+StackYellowPages); 532 __ addptr(rax, max_pages * page_size); 533 534 // check against the current stack bottom 535 __ cmpptr(rsp, rax); 536 __ jcc(Assembler::above, after_frame_check_pop); 537 538 __ pop(rsi); // get saved bcp / (c++ prev state ). 539 540 // Restore sender's sp as SP. This is necessary if the sender's 541 // frame is an extended compiled frame (see gen_c2i_adapter()) 542 // and safer anyway in case of JSR292 adaptations. 543 544 __ pop(rax); // return address must be moved if SP is changed 545 __ mov(rsp, rsi); 546 __ push(rax); 547 548 // Note: the restored frame is not necessarily interpreted. 549 // Use the shared runtime version of the StackOverflowError. 550 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 551 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 552 // all done with frame size check 553 __ bind(after_frame_check_pop); 554 __ pop(rsi); 555 556 __ bind(after_frame_check); 557 } 558 559 // Allocate monitor and lock method (asm interpreter) 560 // rbx, - Method* 561 // 562 void InterpreterGenerator::lock_method(void) { 563 // synchronize method 564 const Address access_flags (rbx, Method::access_flags_offset()); 565 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 566 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 567 568 #ifdef ASSERT 569 { Label L; 570 __ movl(rax, access_flags); 571 __ testl(rax, JVM_ACC_SYNCHRONIZED); 572 __ jcc(Assembler::notZero, L); 573 __ stop("method doesn't need synchronization"); 574 __ bind(L); 575 } 576 #endif // ASSERT 577 // get synchronization object 578 { Label done; 579 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 580 __ movl(rax, access_flags); 581 __ testl(rax, JVM_ACC_STATIC); 582 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 583 __ jcc(Assembler::zero, done); 584 __ movptr(rax, Address(rbx, Method::const_offset())); 585 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); 586 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes())); 587 __ movptr(rax, Address(rax, mirror_offset)); 588 __ bind(done); 589 } 590 // add space for monitor & lock 591 __ subptr(rsp, entry_size); // add space for a monitor entry 592 __ movptr(monitor_block_top, rsp); // set new monitor block top 593 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object 594 __ mov(rdx, rsp); // object address 595 __ lock_object(rdx); 596 } 597 598 // 599 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 600 // and for native methods hence the shared code. 601 602 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 603 // initialize fixed part of activation frame 604 __ push(rax); // save return address 605 __ enter(); // save old & set new rbp, 606 607 608 __ push(rsi); // set sender sp 609 __ push((int32_t)NULL_WORD); // leave last_sp as null 610 __ movptr(rsi, Address(rbx,Method::const_offset())); // get ConstMethod* 611 __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase 612 __ push(rbx); // save Method* 613 if (ProfileInterpreter) { 614 Label method_data_continue; 615 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 616 __ testptr(rdx, rdx); 617 __ jcc(Assembler::zero, method_data_continue); 618 __ addptr(rdx, in_bytes(MethodData::data_offset())); 619 __ bind(method_data_continue); 620 __ push(rdx); // set the mdp (method data pointer) 621 } else { 622 __ push(0); 623 } 624 625 __ movptr(rdx, Address(rbx, Method::const_offset())); 626 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 627 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 628 __ push(rdx); // set constant pool cache 629 __ push(rdi); // set locals pointer 630 if (native_call) { 631 __ push(0); // no bcp 632 } else { 633 __ push(rsi); // set bcp 634 } 635 __ push(0); // reserve word for pointer to expression stack bottom 636 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 637 } 638 639 // End of helpers 640 641 // 642 // Various method entries 643 //------------------------------------------------------------------------------------------------------------------------ 644 // 645 // 646 647 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry 648 649 address InterpreterGenerator::generate_accessor_entry(void) { 650 651 // rbx,: Method* 652 // rcx: receiver (preserve for slow entry into asm interpreter) 653 654 // rsi: senderSP must preserved for slow path, set SP to it on fast path 655 656 address entry_point = __ pc(); 657 Label xreturn_path; 658 659 // do fastpath for resolved accessor methods 660 if (UseFastAccessorMethods) { 661 Label slow_path; 662 // If we need a safepoint check, generate full interpreter entry. 663 ExternalAddress state(SafepointSynchronize::address_of_state()); 664 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 665 SafepointSynchronize::_not_synchronized); 666 667 __ jcc(Assembler::notEqual, slow_path); 668 // ASM/C++ Interpreter 669 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1 670 // Note: We can only use this code if the getfield has been resolved 671 // and if we don't have a null-pointer exception => check for 672 // these conditions first and use slow path if necessary. 673 // rbx,: method 674 // rcx: receiver 675 __ movptr(rax, Address(rsp, wordSize)); 676 677 // check if local 0 != NULL and read field 678 __ testptr(rax, rax); 679 __ jcc(Assembler::zero, slow_path); 680 681 // read first instruction word and extract bytecode @ 1 and index @ 2 682 __ movptr(rdx, Address(rbx, Method::const_offset())); 683 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset())); 684 __ movl(rdx, Address(rdx, ConstMethod::codes_offset())); 685 // Shift codes right to get the index on the right. 686 // The bytecode fetched looks like <index><0xb4><0x2a> 687 __ shrl(rdx, 2*BitsPerByte); 688 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); 689 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes())); 690 691 // rax,: local 0 692 // rbx,: method 693 // rcx: receiver - do not destroy since it is needed for slow path! 694 // rcx: scratch 695 // rdx: constant pool cache index 696 // rdi: constant pool cache 697 // rsi: sender sp 698 699 // check if getfield has been resolved and read constant pool cache entry 700 // check the validity of the cache entry by testing whether _indices field 701 // contains Bytecode::_getfield in b1 byte. 702 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); 703 __ movl(rcx, 704 Address(rdi, 705 rdx, 706 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); 707 __ shrl(rcx, 2*BitsPerByte); 708 __ andl(rcx, 0xFF); 709 __ cmpl(rcx, Bytecodes::_getfield); 710 __ jcc(Assembler::notEqual, slow_path); 711 712 // Note: constant pool entry is not valid before bytecode is resolved 713 __ movptr(rcx, 714 Address(rdi, 715 rdx, 716 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 717 __ movl(rdx, 718 Address(rdi, 719 rdx, 720 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 721 722 Label notByte, notShort, notChar; 723 const Address field_address (rax, rcx, Address::times_1); 724 725 // Need to differentiate between igetfield, agetfield, bgetfield etc. 726 // because they are different sizes. 727 // Use the type from the constant pool cache 728 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); 729 // Make sure we don't need to mask rdx after the above shift 730 ConstantPoolCacheEntry::verify_tos_state_shift(); 731 __ cmpl(rdx, btos); 732 __ jcc(Assembler::notEqual, notByte); 733 __ load_signed_byte(rax, field_address); 734 __ jmp(xreturn_path); 735 736 __ bind(notByte); 737 __ cmpl(rdx, stos); 738 __ jcc(Assembler::notEqual, notShort); 739 __ load_signed_short(rax, field_address); 740 __ jmp(xreturn_path); 741 742 __ bind(notShort); 743 __ cmpl(rdx, ctos); 744 __ jcc(Assembler::notEqual, notChar); 745 __ load_unsigned_short(rax, field_address); 746 __ jmp(xreturn_path); 747 748 __ bind(notChar); 749 #ifdef ASSERT 750 Label okay; 751 __ cmpl(rdx, atos); 752 __ jcc(Assembler::equal, okay); 753 __ cmpl(rdx, itos); 754 __ jcc(Assembler::equal, okay); 755 __ stop("what type is this?"); 756 __ bind(okay); 757 #endif // ASSERT 758 // All the rest are a 32 bit wordsize 759 // This is ok for now. Since fast accessors should be going away 760 __ movptr(rax, field_address); 761 762 __ bind(xreturn_path); 763 764 // _ireturn/_areturn 765 __ pop(rdi); // get return address 766 __ mov(rsp, rsi); // set sp to sender sp 767 __ jmp(rdi); 768 769 // generate a vanilla interpreter entry as the slow path 770 __ bind(slow_path); 771 772 (void) generate_normal_entry(false); 773 return entry_point; 774 } 775 return NULL; 776 777 } 778 779 // Method entry for java.lang.ref.Reference.get. 780 address InterpreterGenerator::generate_Reference_get_entry(void) { 781 #if INCLUDE_ALL_GCS 782 // Code: _aload_0, _getfield, _areturn 783 // parameter size = 1 784 // 785 // The code that gets generated by this routine is split into 2 parts: 786 // 1. The "intrinsified" code for G1 (or any SATB based GC), 787 // 2. The slow path - which is an expansion of the regular method entry. 788 // 789 // Notes:- 790 // * In the G1 code we do not check whether we need to block for 791 // a safepoint. If G1 is enabled then we must execute the specialized 792 // code for Reference.get (except when the Reference object is null) 793 // so that we can log the value in the referent field with an SATB 794 // update buffer. 795 // If the code for the getfield template is modified so that the 796 // G1 pre-barrier code is executed when the current method is 797 // Reference.get() then going through the normal method entry 798 // will be fine. 799 // * The G1 code below can, however, check the receiver object (the instance 800 // of java.lang.Reference) and jump to the slow path if null. If the 801 // Reference object is null then we obviously cannot fetch the referent 802 // and so we don't need to call the G1 pre-barrier. Thus we can use the 803 // regular method entry code to generate the NPE. 804 // 805 // This code is based on generate_accessor_enty. 806 807 // rbx,: Method* 808 // rcx: receiver (preserve for slow entry into asm interpreter) 809 810 // rsi: senderSP must preserved for slow path, set SP to it on fast path 811 812 address entry = __ pc(); 813 814 const int referent_offset = java_lang_ref_Reference::referent_offset; 815 guarantee(referent_offset > 0, "referent offset not initialized"); 816 817 if (UseG1GC) { 818 Label slow_path; 819 820 // Check if local 0 != NULL 821 // If the receiver is null then it is OK to jump to the slow path. 822 __ movptr(rax, Address(rsp, wordSize)); 823 __ testptr(rax, rax); 824 __ jcc(Assembler::zero, slow_path); 825 826 // rax: local 0 (must be preserved across the G1 barrier call) 827 // 828 // rbx: method (at this point it's scratch) 829 // rcx: receiver (at this point it's scratch) 830 // rdx: scratch 831 // rdi: scratch 832 // 833 // rsi: sender sp 834 835 // Preserve the sender sp in case the pre-barrier 836 // calls the runtime 837 __ push(rsi); 838 839 // Load the value of the referent field. 840 const Address field_address(rax, referent_offset); 841 __ movptr(rax, field_address); 842 843 // Generate the G1 pre-barrier code to log the value of 844 // the referent field in an SATB buffer. 845 __ get_thread(rcx); 846 __ g1_write_barrier_pre(noreg /* obj */, 847 rax /* pre_val */, 848 rcx /* thread */, 849 rbx /* tmp */, 850 true /* tosca_save */, 851 true /* expand_call */); 852 853 // _areturn 854 __ pop(rsi); // get sender sp 855 __ pop(rdi); // get return address 856 __ mov(rsp, rsi); // set sp to sender sp 857 __ jmp(rdi); 858 859 __ bind(slow_path); 860 (void) generate_normal_entry(false); 861 862 return entry; 863 } 864 #endif // INCLUDE_ALL_GCS 865 866 // If G1 is not enabled then attempt to go through the accessor entry point 867 // Reference.get is an accessor 868 return generate_accessor_entry(); 869 } 870 871 /** 872 * Method entry for static native methods: 873 * int java.util.zip.CRC32.update(int crc, int b) 874 */ 875 address InterpreterGenerator::generate_CRC32_update_entry() { 876 if (UseCRC32Intrinsics) { 877 address entry = __ pc(); 878 879 // rbx,: Method* 880 // rsi: senderSP must preserved for slow path, set SP to it on fast path 881 // rdx: scratch 882 // rdi: scratch 883 884 Label slow_path; 885 // If we need a safepoint check, generate full interpreter entry. 886 ExternalAddress state(SafepointSynchronize::address_of_state()); 887 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 888 SafepointSynchronize::_not_synchronized); 889 __ jcc(Assembler::notEqual, slow_path); 890 891 // We don't generate local frame and don't align stack because 892 // we call stub code and there is no safepoint on this path. 893 894 // Load parameters 895 const Register crc = rax; // crc 896 const Register val = rdx; // source java byte value 897 const Register tbl = rdi; // scratch 898 899 // Arguments are reversed on java expression stack 900 __ movl(val, Address(rsp, wordSize)); // byte value 901 __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC 902 903 __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); 904 __ notl(crc); // ~crc 905 __ update_byte_crc32(crc, val, tbl); 906 __ notl(crc); // ~crc 907 // result in rax 908 909 // _areturn 910 __ pop(rdi); // get return address 911 __ mov(rsp, rsi); // set sp to sender sp 912 __ jmp(rdi); 913 914 // generate a vanilla native entry as the slow path 915 __ bind(slow_path); 916 917 (void) generate_native_entry(false); 918 919 return entry; 920 } 921 return generate_native_entry(false); 922 } 923 924 /** 925 * Method entry for static native methods: 926 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 927 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 928 */ 929 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 930 if (UseCRC32Intrinsics) { 931 address entry = __ pc(); 932 933 // rbx,: Method* 934 // rsi: senderSP must preserved for slow path, set SP to it on fast path 935 // rdx: scratch 936 // rdi: scratch 937 938 Label slow_path; 939 // If we need a safepoint check, generate full interpreter entry. 940 ExternalAddress state(SafepointSynchronize::address_of_state()); 941 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 942 SafepointSynchronize::_not_synchronized); 943 __ jcc(Assembler::notEqual, slow_path); 944 945 // We don't generate local frame and don't align stack because 946 // we call stub code and there is no safepoint on this path. 947 948 // Load parameters 949 const Register crc = rax; // crc 950 const Register buf = rdx; // source java byte array address 951 const Register len = rdi; // length 952 953 // Arguments are reversed on java expression stack 954 __ movl(len, Address(rsp, wordSize)); // Length 955 // Calculate address of start element 956 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 957 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf 958 __ addptr(buf, Address(rsp, 2*wordSize)); // + offset 959 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC 960 } else { 961 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array 962 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 963 __ addptr(buf, Address(rsp, 2*wordSize)); // + offset 964 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC 965 } 966 967 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); 968 // result in rax 969 970 // _areturn 971 __ pop(rdi); // get return address 972 __ mov(rsp, rsi); // set sp to sender sp 973 __ jmp(rdi); 974 975 // generate a vanilla native entry as the slow path 976 __ bind(slow_path); 977 978 (void) generate_native_entry(false); 979 980 return entry; 981 } 982 return generate_native_entry(false); 983 } 984 985 // 986 // Interpreter stub for calling a native method. (asm interpreter) 987 // This sets up a somewhat different looking stack for calling the native method 988 // than the typical interpreter frame setup. 989 // 990 991 address InterpreterGenerator::generate_native_entry(bool synchronized) { 992 // determine code generation flags 993 bool inc_counter = UseCompiler || CountCompiledCalls; 994 995 // rbx,: Method* 996 // rsi: sender sp 997 // rsi: previous interpreter state (C++ interpreter) must preserve 998 address entry_point = __ pc(); 999 1000 const Address constMethod (rbx, Method::const_offset()); 1001 const Address access_flags (rbx, Method::access_flags_offset()); 1002 const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset()); 1003 1004 // get parameter size (always needed) 1005 __ movptr(rcx, constMethod); 1006 __ load_unsigned_short(rcx, size_of_parameters); 1007 1008 // native calls don't need the stack size check since they have no expression stack 1009 // and the arguments are already on the stack and we only add a handful of words 1010 // to the stack 1011 1012 // rbx,: Method* 1013 // rcx: size of parameters 1014 // rsi: sender sp 1015 1016 __ pop(rax); // get return address 1017 // for natives the size of locals is zero 1018 1019 // compute beginning of parameters (rdi) 1020 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1021 1022 1023 // add 2 zero-initialized slots for native calls 1024 // NULL result handler 1025 __ push((int32_t)NULL_WORD); 1026 // NULL oop temp (mirror or jni oop result) 1027 __ push((int32_t)NULL_WORD); 1028 1029 // initialize fixed part of activation frame 1030 generate_fixed_frame(true); 1031 1032 // make sure method is native & not abstract 1033 #ifdef ASSERT 1034 __ movl(rax, access_flags); 1035 { 1036 Label L; 1037 __ testl(rax, JVM_ACC_NATIVE); 1038 __ jcc(Assembler::notZero, L); 1039 __ stop("tried to execute non-native method as native"); 1040 __ bind(L); 1041 } 1042 { Label L; 1043 __ testl(rax, JVM_ACC_ABSTRACT); 1044 __ jcc(Assembler::zero, L); 1045 __ stop("tried to execute abstract method in interpreter"); 1046 __ bind(L); 1047 } 1048 #endif 1049 1050 // Since at this point in the method invocation the exception handler 1051 // would try to exit the monitor of synchronized methods which hasn't 1052 // been entered yet, we set the thread local variable 1053 // _do_not_unlock_if_synchronized to true. The remove_activation will 1054 // check this flag. 1055 1056 __ get_thread(rax); 1057 const Address do_not_unlock_if_synchronized(rax, 1058 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1059 __ movbool(do_not_unlock_if_synchronized, true); 1060 1061 // increment invocation count & check for overflow 1062 Label invocation_counter_overflow; 1063 if (inc_counter) { 1064 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1065 } 1066 1067 Label continue_after_compile; 1068 __ bind(continue_after_compile); 1069 1070 bang_stack_shadow_pages(true); 1071 1072 // reset the _do_not_unlock_if_synchronized flag 1073 __ get_thread(rax); 1074 __ movbool(do_not_unlock_if_synchronized, false); 1075 1076 // check for synchronized methods 1077 // Must happen AFTER invocation_counter check and stack overflow check, 1078 // so method is not locked if overflows. 1079 // 1080 if (synchronized) { 1081 lock_method(); 1082 } else { 1083 // no synchronization necessary 1084 #ifdef ASSERT 1085 { Label L; 1086 __ movl(rax, access_flags); 1087 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1088 __ jcc(Assembler::zero, L); 1089 __ stop("method needs synchronization"); 1090 __ bind(L); 1091 } 1092 #endif 1093 } 1094 1095 // start execution 1096 #ifdef ASSERT 1097 { Label L; 1098 const Address monitor_block_top (rbp, 1099 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1100 __ movptr(rax, monitor_block_top); 1101 __ cmpptr(rax, rsp); 1102 __ jcc(Assembler::equal, L); 1103 __ stop("broken stack frame setup in interpreter"); 1104 __ bind(L); 1105 } 1106 #endif 1107 1108 // jvmti/dtrace support 1109 __ notify_method_entry(); 1110 1111 // work registers 1112 const Register method = rbx; 1113 const Register thread = rdi; 1114 const Register t = rcx; 1115 1116 // allocate space for parameters 1117 __ get_method(method); 1118 __ movptr(t, Address(method, Method::const_offset())); 1119 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1120 1121 __ shlptr(t, Interpreter::logStackElementSize); 1122 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 1123 __ subptr(rsp, t); 1124 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 1125 1126 // get signature handler 1127 { Label L; 1128 __ movptr(t, Address(method, Method::signature_handler_offset())); 1129 __ testptr(t, t); 1130 __ jcc(Assembler::notZero, L); 1131 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 1132 __ get_method(method); 1133 __ movptr(t, Address(method, Method::signature_handler_offset())); 1134 __ bind(L); 1135 } 1136 1137 // call signature handler 1138 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code"); 1139 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code"); 1140 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code"); 1141 // The generated handlers do not touch RBX (the method oop). 1142 // However, large signatures cannot be cached and are generated 1143 // each time here. The slow-path generator will blow RBX 1144 // sometime, so we must reload it after the call. 1145 __ call(t); 1146 __ get_method(method); // slow path call blows RBX on DevStudio 5.0 1147 1148 // result handler is in rax, 1149 // set result handler 1150 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax); 1151 1152 // pass mirror handle if static call 1153 { Label L; 1154 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1155 __ movl(t, Address(method, Method::access_flags_offset())); 1156 __ testl(t, JVM_ACC_STATIC); 1157 __ jcc(Assembler::zero, L); 1158 // get mirror 1159 __ movptr(t, Address(method, Method:: const_offset())); 1160 __ movptr(t, Address(t, ConstMethod::constants_offset())); 1161 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 1162 __ movptr(t, Address(t, mirror_offset)); 1163 // copy mirror into activation frame 1164 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); 1165 // pass handle to mirror 1166 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1167 __ movptr(Address(rsp, wordSize), t); 1168 __ bind(L); 1169 } 1170 1171 // get native function entry point 1172 { Label L; 1173 __ movptr(rax, Address(method, Method::native_function_offset())); 1174 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1175 __ cmpptr(rax, unsatisfied.addr()); 1176 __ jcc(Assembler::notEqual, L); 1177 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 1178 __ get_method(method); 1179 __ movptr(rax, Address(method, Method::native_function_offset())); 1180 __ bind(L); 1181 } 1182 1183 // pass JNIEnv 1184 __ get_thread(thread); 1185 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1186 __ movptr(Address(rsp, 0), t); 1187 1188 // set_last_Java_frame_before_call 1189 // It is enough that the pc() 1190 // points into the right code segment. It does not have to be the correct return pc. 1191 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1192 1193 // change thread state 1194 #ifdef ASSERT 1195 { Label L; 1196 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1197 __ cmpl(t, _thread_in_Java); 1198 __ jcc(Assembler::equal, L); 1199 __ stop("Wrong thread state in native stub"); 1200 __ bind(L); 1201 } 1202 #endif 1203 1204 // Change state to native 1205 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); 1206 __ call(rax); 1207 1208 // result potentially in rdx:rax or ST0 1209 1210 // Verify or restore cpu control state after JNI call 1211 __ restore_cpu_control_state_after_jni(); 1212 1213 // save potential result in ST(0) & rdx:rax 1214 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1215 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1216 // It is safe to do this push because state is _thread_in_native and return address will be found 1217 // via _last_native_pc and not via _last_jave_sp 1218 1219 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1220 // If the order changes or anything else is added to the stack the code in 1221 // interpreter_frame_result will have to be changed. 1222 1223 { Label L; 1224 Label push_double; 1225 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1226 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1227 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1228 float_handler.addr()); 1229 __ jcc(Assembler::equal, push_double); 1230 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1231 double_handler.addr()); 1232 __ jcc(Assembler::notEqual, L); 1233 __ bind(push_double); 1234 __ push(dtos); 1235 __ bind(L); 1236 } 1237 __ push(ltos); 1238 1239 // change thread state 1240 __ get_thread(thread); 1241 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); 1242 if(os::is_MP()) { 1243 if (UseMembar) { 1244 // Force this write out before the read below 1245 __ membar(Assembler::Membar_mask_bits( 1246 Assembler::LoadLoad | Assembler::LoadStore | 1247 Assembler::StoreLoad | Assembler::StoreStore)); 1248 } else { 1249 // Write serialization page so VM thread can do a pseudo remote membar. 1250 // We use the current thread pointer to calculate a thread specific 1251 // offset to write to within the page. This minimizes bus traffic 1252 // due to cache line collision. 1253 __ serialize_memory(thread, rcx); 1254 } 1255 } 1256 1257 if (AlwaysRestoreFPU) { 1258 // Make sure the control word is correct. 1259 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1260 } 1261 1262 // check for safepoint operation in progress and/or pending suspend requests 1263 { Label Continue; 1264 1265 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1266 SafepointSynchronize::_not_synchronized); 1267 1268 Label L; 1269 __ jcc(Assembler::notEqual, L); 1270 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1271 __ jcc(Assembler::equal, Continue); 1272 __ bind(L); 1273 1274 // Don't use call_VM as it will see a possible pending exception and forward it 1275 // and never return here preventing us from clearing _last_native_pc down below. 1276 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are 1277 // preserved and correspond to the bcp/locals pointers. So we do a runtime call 1278 // by hand. 1279 // 1280 __ push(thread); 1281 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1282 JavaThread::check_special_condition_for_native_trans))); 1283 __ increment(rsp, wordSize); 1284 __ get_thread(thread); 1285 1286 __ bind(Continue); 1287 } 1288 1289 // change thread state 1290 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1291 1292 __ reset_last_Java_frame(thread, true, true); 1293 1294 // reset handle block 1295 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1296 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD); 1297 1298 // If result was an oop then unbox and save it in the frame 1299 { Label L; 1300 Label no_oop, store_result; 1301 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT)); 1302 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), 1303 handler.addr()); 1304 __ jcc(Assembler::notEqual, no_oop); 1305 __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD); 1306 __ pop(ltos); 1307 __ testptr(rax, rax); 1308 __ jcc(Assembler::zero, store_result); 1309 // unbox 1310 __ movptr(rax, Address(rax, 0)); 1311 __ bind(store_result); 1312 __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax); 1313 // keep stack depth as expected by pushing oop which will eventually be discarded 1314 __ push(ltos); 1315 __ bind(no_oop); 1316 } 1317 1318 { 1319 Label no_reguard; 1320 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); 1321 __ jcc(Assembler::notEqual, no_reguard); 1322 1323 __ pusha(); 1324 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1325 __ popa(); 1326 1327 __ bind(no_reguard); 1328 } 1329 1330 // restore rsi to have legal interpreter frame, 1331 // i.e., bci == 0 <=> rsi == code_base() 1332 // Can't call_VM until bcp is within reasonable. 1333 __ get_method(method); // method is junk from thread_in_native to now. 1334 __ movptr(rsi, Address(method,Method::const_offset())); // get ConstMethod* 1335 __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase 1336 1337 // handle exceptions (exception handling will handle unlocking!) 1338 { Label L; 1339 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1340 __ jcc(Assembler::zero, L); 1341 // Note: At some point we may want to unify this with the code used in call_VM_base(); 1342 // i.e., we should use the StubRoutines::forward_exception code. For now this 1343 // doesn't work here because the rsp is not correctly set at this point. 1344 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1345 __ should_not_reach_here(); 1346 __ bind(L); 1347 } 1348 1349 // do unlocking if necessary 1350 { Label L; 1351 __ movl(t, Address(method, Method::access_flags_offset())); 1352 __ testl(t, JVM_ACC_SYNCHRONIZED); 1353 __ jcc(Assembler::zero, L); 1354 // the code below should be shared with interpreter macro assembler implementation 1355 { Label unlock; 1356 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1357 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1358 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); 1359 1360 __ lea(rdx, monitor); // address of first monitor 1361 1362 __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); 1363 __ testptr(t, t); 1364 __ jcc(Assembler::notZero, unlock); 1365 1366 // Entry already unlocked, need to throw exception 1367 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1368 __ should_not_reach_here(); 1369 1370 __ bind(unlock); 1371 __ unlock_object(rdx); 1372 } 1373 __ bind(L); 1374 } 1375 1376 // jvmti/dtrace support 1377 // Note: This must happen _after_ handling/throwing any exceptions since 1378 // the exception handler code notifies the runtime of method exits 1379 // too. If this happens before, method entry/exit notifications are 1380 // not properly paired (was bug - gri 11/22/99). 1381 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1382 1383 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result 1384 __ pop(ltos); 1385 __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1386 __ call(t); 1387 1388 // remove activation 1389 __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp 1390 __ leave(); // remove frame anchor 1391 __ pop(rdi); // get return address 1392 __ mov(rsp, t); // set sp to sender sp 1393 __ jmp(rdi); 1394 1395 if (inc_counter) { 1396 // Handle overflow of counter and compile method 1397 __ bind(invocation_counter_overflow); 1398 generate_counter_overflow(&continue_after_compile); 1399 } 1400 1401 return entry_point; 1402 } 1403 1404 // 1405 // Generic interpreted method entry to (asm) interpreter 1406 // 1407 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1408 // determine code generation flags 1409 bool inc_counter = UseCompiler || CountCompiledCalls; 1410 1411 // rbx,: Method* 1412 // rsi: sender sp 1413 address entry_point = __ pc(); 1414 1415 const Address constMethod (rbx, Method::const_offset()); 1416 const Address access_flags (rbx, Method::access_flags_offset()); 1417 const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); 1418 const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset()); 1419 1420 // get parameter size (always needed) 1421 __ movptr(rdx, constMethod); 1422 __ load_unsigned_short(rcx, size_of_parameters); 1423 1424 // rbx,: Method* 1425 // rcx: size of parameters 1426 1427 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1428 1429 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1430 __ subl(rdx, rcx); // rdx = no. of additional locals 1431 1432 // see if we've got enough room on the stack for locals plus overhead. 1433 generate_stack_overflow_check(); 1434 1435 // get return address 1436 __ pop(rax); 1437 1438 // compute beginning of parameters (rdi) 1439 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1440 1441 // rdx - # of additional locals 1442 // allocate space for locals 1443 // explicitly initialize locals 1444 { 1445 Label exit, loop; 1446 __ testl(rdx, rdx); 1447 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1448 __ bind(loop); 1449 __ push((int32_t)NULL_WORD); // initialize local variables 1450 __ decrement(rdx); // until everything initialized 1451 __ jcc(Assembler::greater, loop); 1452 __ bind(exit); 1453 } 1454 1455 // initialize fixed part of activation frame 1456 generate_fixed_frame(false); 1457 1458 // make sure method is not native & not abstract 1459 #ifdef ASSERT 1460 __ movl(rax, access_flags); 1461 { 1462 Label L; 1463 __ testl(rax, JVM_ACC_NATIVE); 1464 __ jcc(Assembler::zero, L); 1465 __ stop("tried to execute native method as non-native"); 1466 __ bind(L); 1467 } 1468 { Label L; 1469 __ testl(rax, JVM_ACC_ABSTRACT); 1470 __ jcc(Assembler::zero, L); 1471 __ stop("tried to execute abstract method in interpreter"); 1472 __ bind(L); 1473 } 1474 #endif 1475 1476 // Since at this point in the method invocation the exception handler 1477 // would try to exit the monitor of synchronized methods which hasn't 1478 // been entered yet, we set the thread local variable 1479 // _do_not_unlock_if_synchronized to true. The remove_activation will 1480 // check this flag. 1481 1482 __ get_thread(rax); 1483 const Address do_not_unlock_if_synchronized(rax, 1484 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1485 __ movbool(do_not_unlock_if_synchronized, true); 1486 1487 // increment invocation count & check for overflow 1488 Label invocation_counter_overflow; 1489 Label profile_method; 1490 Label profile_method_continue; 1491 if (inc_counter) { 1492 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1493 if (ProfileInterpreter) { 1494 __ bind(profile_method_continue); 1495 } 1496 } 1497 Label continue_after_compile; 1498 __ bind(continue_after_compile); 1499 1500 bang_stack_shadow_pages(false); 1501 1502 // reset the _do_not_unlock_if_synchronized flag 1503 __ get_thread(rax); 1504 __ movbool(do_not_unlock_if_synchronized, false); 1505 1506 // check for synchronized methods 1507 // Must happen AFTER invocation_counter check and stack overflow check, 1508 // so method is not locked if overflows. 1509 // 1510 if (synchronized) { 1511 // Allocate monitor and lock method 1512 lock_method(); 1513 } else { 1514 // no synchronization necessary 1515 #ifdef ASSERT 1516 { Label L; 1517 __ movl(rax, access_flags); 1518 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1519 __ jcc(Assembler::zero, L); 1520 __ stop("method needs synchronization"); 1521 __ bind(L); 1522 } 1523 #endif 1524 } 1525 1526 // start execution 1527 #ifdef ASSERT 1528 { Label L; 1529 const Address monitor_block_top (rbp, 1530 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1531 __ movptr(rax, monitor_block_top); 1532 __ cmpptr(rax, rsp); 1533 __ jcc(Assembler::equal, L); 1534 __ stop("broken stack frame setup in interpreter"); 1535 __ bind(L); 1536 } 1537 #endif 1538 1539 // jvmti support 1540 __ notify_method_entry(); 1541 1542 __ dispatch_next(vtos); 1543 1544 // invocation counter overflow 1545 if (inc_counter) { 1546 if (ProfileInterpreter) { 1547 // We have decided to profile this method in the interpreter 1548 __ bind(profile_method); 1549 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1550 __ set_method_data_pointer_for_bcp(); 1551 __ get_method(rbx); 1552 __ jmp(profile_method_continue); 1553 } 1554 // Handle overflow of counter and compile method 1555 __ bind(invocation_counter_overflow); 1556 generate_counter_overflow(&continue_after_compile); 1557 } 1558 1559 return entry_point; 1560 } 1561 1562 //------------------------------------------------------------------------------------------------------------------------ 1563 // Entry points 1564 // 1565 // Here we generate the various kind of entries into the interpreter. 1566 // The two main entry type are generic bytecode methods and native call method. 1567 // These both come in synchronized and non-synchronized versions but the 1568 // frame layout they create is very similar. The other method entry 1569 // types are really just special purpose entries that are really entry 1570 // and interpretation all in one. These are for trivial methods like 1571 // accessor, empty, or special math methods. 1572 // 1573 // When control flow reaches any of the entry types for the interpreter 1574 // the following holds -> 1575 // 1576 // Arguments: 1577 // 1578 // rbx,: Method* 1579 // rcx: receiver 1580 // 1581 // 1582 // Stack layout immediately at entry 1583 // 1584 // [ return address ] <--- rsp 1585 // [ parameter n ] 1586 // ... 1587 // [ parameter 1 ] 1588 // [ expression stack ] (caller's java expression stack) 1589 1590 // Assuming that we don't go to one of the trivial specialized 1591 // entries the stack will look like below when we are ready to execute 1592 // the first bytecode (or call the native routine). The register usage 1593 // will be as the template based interpreter expects (see interpreter_x86.hpp). 1594 // 1595 // local variables follow incoming parameters immediately; i.e. 1596 // the return address is moved to the end of the locals). 1597 // 1598 // [ monitor entry ] <--- rsp 1599 // ... 1600 // [ monitor entry ] 1601 // [ expr. stack bottom ] 1602 // [ saved rsi ] 1603 // [ current rdi ] 1604 // [ Method* ] 1605 // [ saved rbp, ] <--- rbp, 1606 // [ return address ] 1607 // [ local variable m ] 1608 // ... 1609 // [ local variable 1 ] 1610 // [ parameter n ] 1611 // ... 1612 // [ parameter 1 ] <--- rdi 1613 1614 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) { 1615 // determine code generation flags 1616 bool synchronized = false; 1617 address entry_point = NULL; 1618 InterpreterGenerator* ig_this = (InterpreterGenerator*)this; 1619 1620 switch (kind) { 1621 case Interpreter::zerolocals : break; 1622 case Interpreter::zerolocals_synchronized: synchronized = true; break; 1623 case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; 1624 case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; 1625 case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; 1626 case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; 1627 case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; 1628 1629 case Interpreter::java_lang_math_sin : // fall thru 1630 case Interpreter::java_lang_math_cos : // fall thru 1631 case Interpreter::java_lang_math_tan : // fall thru 1632 case Interpreter::java_lang_math_abs : // fall thru 1633 case Interpreter::java_lang_math_log : // fall thru 1634 case Interpreter::java_lang_math_log10 : // fall thru 1635 case Interpreter::java_lang_math_sqrt : // fall thru 1636 case Interpreter::java_lang_math_pow : // fall thru 1637 case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; 1638 case Interpreter::java_lang_ref_reference_get 1639 : entry_point = ig_this->generate_Reference_get_entry(); break; 1640 case Interpreter::java_util_zip_CRC32_update 1641 : entry_point = ig_this->generate_CRC32_update_entry(); break; 1642 case Interpreter::java_util_zip_CRC32_updateBytes 1643 : // fall thru 1644 case Interpreter::java_util_zip_CRC32_updateByteBuffer 1645 : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; 1646 default: 1647 fatal(err_msg("unexpected method kind: %d", kind)); 1648 break; 1649 } 1650 1651 if (entry_point) return entry_point; 1652 1653 return ig_this->generate_normal_entry(synchronized); 1654 1655 } 1656 1657 // These should never be compiled since the interpreter will prefer 1658 // the compiled version to the intrinsic version. 1659 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1660 switch (method_kind(m)) { 1661 case Interpreter::java_lang_math_sin : // fall thru 1662 case Interpreter::java_lang_math_cos : // fall thru 1663 case Interpreter::java_lang_math_tan : // fall thru 1664 case Interpreter::java_lang_math_abs : // fall thru 1665 case Interpreter::java_lang_math_log : // fall thru 1666 case Interpreter::java_lang_math_log10 : // fall thru 1667 case Interpreter::java_lang_math_sqrt : // fall thru 1668 case Interpreter::java_lang_math_pow : // fall thru 1669 case Interpreter::java_lang_math_exp : 1670 return false; 1671 default: 1672 return true; 1673 } 1674 } 1675 1676 // How much stack a method activation needs in words. 1677 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1678 1679 const int stub_code = 4; // see generate_call_stub 1680 // Save space for one monitor to get into the interpreted method in case 1681 // the method is synchronized 1682 int monitor_size = method->is_synchronized() ? 1683 1*frame::interpreter_frame_monitor_size() : 0; 1684 1685 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). 1686 // be sure to change this if you add/subtract anything to/from the overhead area 1687 const int overhead_size = -frame::interpreter_frame_initial_sp_offset; 1688 1689 const int method_stack = (method->max_locals() + method->max_stack()) * 1690 Interpreter::stackElementWords; 1691 return overhead_size + method_stack + stub_code; 1692 } 1693 1694 // asm based interpreter deoptimization helpers 1695 1696 int AbstractInterpreter::layout_activation(Method* method, 1697 int tempcount, 1698 int popframe_extra_args, 1699 int moncount, 1700 int caller_actual_parameters, 1701 int callee_param_count, 1702 int callee_locals, 1703 frame* caller, 1704 frame* interpreter_frame, 1705 bool is_top_frame, 1706 bool is_bottom_frame) { 1707 // Note: This calculation must exactly parallel the frame setup 1708 // in AbstractInterpreterGenerator::generate_method_entry. 1709 // If interpreter_frame!=NULL, set up the method, locals, and monitors. 1710 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size, 1711 // as determined by a previous call to this method. 1712 // It is also guaranteed to be walkable even though it is in a skeletal state 1713 // NOTE: return size is in words not bytes 1714 1715 // fixed size of an interpreter frame: 1716 int max_locals = method->max_locals() * Interpreter::stackElementWords; 1717 int extra_locals = (method->max_locals() - method->size_of_parameters()) * 1718 Interpreter::stackElementWords; 1719 1720 int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset; 1721 1722 // Our locals were accounted for by the caller (or last_frame_adjust on the transistion) 1723 // Since the callee parameters already account for the callee's params we only need to account for 1724 // the extra locals. 1725 1726 1727 int size = overhead + 1728 ((callee_locals - callee_param_count)*Interpreter::stackElementWords) + 1729 (moncount*frame::interpreter_frame_monitor_size()) + 1730 tempcount*Interpreter::stackElementWords + popframe_extra_args; 1731 1732 if (interpreter_frame != NULL) { 1733 #ifdef ASSERT 1734 if (!EnableInvokeDynamic) 1735 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences? 1736 // Probably, since deoptimization doesn't work yet. 1737 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable"); 1738 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)"); 1739 #endif 1740 1741 interpreter_frame->interpreter_frame_set_method(method); 1742 // NOTE the difference in using sender_sp and interpreter_frame_sender_sp 1743 // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp) 1744 // and sender_sp is fp+8 1745 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; 1746 1747 #ifdef ASSERT 1748 if (caller->is_interpreted_frame()) { 1749 assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); 1750 } 1751 #endif 1752 1753 interpreter_frame->interpreter_frame_set_locals(locals); 1754 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); 1755 BasicObjectLock* monbot = montop - moncount; 1756 interpreter_frame->interpreter_frame_set_monitor_end(monbot); 1757 1758 // Set last_sp 1759 intptr_t* rsp = (intptr_t*) monbot - 1760 tempcount*Interpreter::stackElementWords - 1761 popframe_extra_args; 1762 interpreter_frame->interpreter_frame_set_last_sp(rsp); 1763 1764 // All frames but the initial (oldest) interpreter frame we fill in have a 1765 // value for sender_sp that allows walking the stack but isn't 1766 // truly correct. Correct the value here. 1767 1768 if (extra_locals != 0 && 1769 interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) { 1770 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals); 1771 } 1772 *interpreter_frame->interpreter_frame_cache_addr() = 1773 method->constants()->cache(); 1774 } 1775 return size; 1776 } 1777 1778 1779 //------------------------------------------------------------------------------------------------------------------------ 1780 // Exceptions 1781 1782 void TemplateInterpreterGenerator::generate_throw_exception() { 1783 // Entry point in previous activation (i.e., if the caller was interpreted) 1784 Interpreter::_rethrow_exception_entry = __ pc(); 1785 const Register thread = rcx; 1786 1787 // Restore sp to interpreter_frame_last_sp even though we are going 1788 // to empty the expression stack for the exception processing. 1789 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1790 // rax,: exception 1791 // rdx: return address/pc that threw exception 1792 __ restore_bcp(); // rsi points to call/send 1793 __ restore_locals(); 1794 1795 // Entry point for exceptions thrown within interpreter code 1796 Interpreter::_throw_exception_entry = __ pc(); 1797 // expression stack is undefined here 1798 // rax,: exception 1799 // rsi: exception bcp 1800 __ verify_oop(rax); 1801 1802 // expression stack must be empty before entering the VM in case of an exception 1803 __ empty_expression_stack(); 1804 __ empty_FPU_stack(); 1805 // find exception handler address and preserve exception oop 1806 __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax); 1807 // rax,: exception handler entry point 1808 // rdx: preserved exception oop 1809 // rsi: bcp for exception handler 1810 __ push_ptr(rdx); // push exception which is now the only value on the stack 1811 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1812 1813 // If the exception is not handled in the current frame the frame is removed and 1814 // the exception is rethrown (i.e. exception continuation is _rethrow_exception). 1815 // 1816 // Note: At this point the bci is still the bxi for the instruction which caused 1817 // the exception and the expression stack is empty. Thus, for any VM calls 1818 // at this point, GC will find a legal oop map (with empty expression stack). 1819 1820 // In current activation 1821 // tos: exception 1822 // rsi: exception bcp 1823 1824 // 1825 // JVMTI PopFrame support 1826 // 1827 1828 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1829 __ empty_expression_stack(); 1830 __ empty_FPU_stack(); 1831 // Set the popframe_processing bit in pending_popframe_condition indicating that we are 1832 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1833 // popframe handling cycles. 1834 __ get_thread(thread); 1835 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1836 __ orl(rdx, JavaThread::popframe_processing_bit); 1837 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1838 1839 { 1840 // Check to see whether we are returning to a deoptimized frame. 1841 // (The PopFrame call ensures that the caller of the popped frame is 1842 // either interpreted or compiled and deoptimizes it if compiled.) 1843 // In this case, we can't call dispatch_next() after the frame is 1844 // popped, but instead must save the incoming arguments and restore 1845 // them after deoptimization has occurred. 1846 // 1847 // Note that we don't compare the return PC against the 1848 // deoptimization blob's unpack entry because of the presence of 1849 // adapter frames in C2. 1850 Label caller_not_deoptimized; 1851 __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize)); 1852 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx); 1853 __ testl(rax, rax); 1854 __ jcc(Assembler::notZero, caller_not_deoptimized); 1855 1856 // Compute size of arguments for saving when returning to deoptimized caller 1857 __ get_method(rax); 1858 __ movptr(rax, Address(rax, Method::const_offset())); 1859 __ load_unsigned_short(rax, Address(rax, ConstMethod::size_of_parameters_offset())); 1860 __ shlptr(rax, Interpreter::logStackElementSize); 1861 __ restore_locals(); 1862 __ subptr(rdi, rax); 1863 __ addptr(rdi, wordSize); 1864 // Save these arguments 1865 __ get_thread(thread); 1866 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi); 1867 1868 __ remove_activation(vtos, rdx, 1869 /* throw_monitor_exception */ false, 1870 /* install_monitor_exception */ false, 1871 /* notify_jvmdi */ false); 1872 1873 // Inform deoptimization that it is responsible for restoring these arguments 1874 __ get_thread(thread); 1875 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit); 1876 1877 // Continue in deoptimization handler 1878 __ jmp(rdx); 1879 1880 __ bind(caller_not_deoptimized); 1881 } 1882 1883 __ remove_activation(vtos, rdx, 1884 /* throw_monitor_exception */ false, 1885 /* install_monitor_exception */ false, 1886 /* notify_jvmdi */ false); 1887 1888 // Finish with popframe handling 1889 // A previous I2C followed by a deoptimization might have moved the 1890 // outgoing arguments further up the stack. PopFrame expects the 1891 // mutations to those outgoing arguments to be preserved and other 1892 // constraints basically require this frame to look exactly as 1893 // though it had previously invoked an interpreted activation with 1894 // no space between the top of the expression stack (current 1895 // last_sp) and the top of stack. Rather than force deopt to 1896 // maintain this kind of invariant all the time we call a small 1897 // fixup routine to move the mutated arguments onto the top of our 1898 // expression stack if necessary. 1899 __ mov(rax, rsp); 1900 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1901 __ get_thread(thread); 1902 // PC must point into interpreter here 1903 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1904 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1905 __ get_thread(thread); 1906 __ reset_last_Java_frame(thread, true, true); 1907 // Restore the last_sp and null it out 1908 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1909 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1910 1911 __ restore_bcp(); 1912 __ restore_locals(); 1913 // The method data pointer was incremented already during 1914 // call profiling. We have to restore the mdp for the current bcp. 1915 if (ProfileInterpreter) { 1916 __ set_method_data_pointer_for_bcp(); 1917 } 1918 1919 // Clear the popframe condition flag 1920 __ get_thread(thread); 1921 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive); 1922 1923 if (EnableInvokeDynamic) { 1924 Label L_done; 1925 const Register local0 = rdi; 1926 1927 __ cmpb(Address(rsi, 0), Bytecodes::_invokestatic); 1928 __ jcc(Assembler::notEqual, L_done); 1929 1930 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1931 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1932 1933 __ get_method(rdx); 1934 __ movptr(rax, Address(local0, 0)); 1935 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rsi); 1936 1937 __ testptr(rax, rax); 1938 __ jcc(Assembler::zero, L_done); 1939 1940 __ movptr(Address(rbx, 0), rax); 1941 __ bind(L_done); 1942 } 1943 1944 __ dispatch_next(vtos); 1945 // end of PopFrame support 1946 1947 Interpreter::_remove_activation_entry = __ pc(); 1948 1949 // preserve exception over this code sequence 1950 __ pop_ptr(rax); 1951 __ get_thread(thread); 1952 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1953 // remove the activation (without doing throws on illegalMonitorExceptions) 1954 __ remove_activation(vtos, rdx, false, true, false); 1955 // restore exception 1956 __ get_thread(thread); 1957 __ get_vm_result(rax, thread); 1958 1959 // Inbetween activations - previous activation type unknown yet 1960 // compute continuation point - the continuation point expects 1961 // the following registers set up: 1962 // 1963 // rax: exception 1964 // rdx: return address/pc that threw exception 1965 // rsp: expression stack of caller 1966 // rbp: rbp, of caller 1967 __ push(rax); // save exception 1968 __ push(rdx); // save return address 1969 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx); 1970 __ mov(rbx, rax); // save exception handler 1971 __ pop(rdx); // restore return address 1972 __ pop(rax); // restore exception 1973 // Note that an "issuing PC" is actually the next PC after the call 1974 __ jmp(rbx); // jump to exception handler of caller 1975 } 1976 1977 1978 // 1979 // JVMTI ForceEarlyReturn support 1980 // 1981 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1982 address entry = __ pc(); 1983 const Register thread = rcx; 1984 1985 __ restore_bcp(); 1986 __ restore_locals(); 1987 __ empty_expression_stack(); 1988 __ empty_FPU_stack(); 1989 __ load_earlyret_value(state); 1990 1991 __ get_thread(thread); 1992 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1993 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1994 1995 // Clear the earlyret state 1996 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1997 1998 __ remove_activation(state, rsi, 1999 false, /* throw_monitor_exception */ 2000 false, /* install_monitor_exception */ 2001 true); /* notify_jvmdi */ 2002 __ jmp(rsi); 2003 return entry; 2004 } // end of ForceEarlyReturn support 2005 2006 2007 //------------------------------------------------------------------------------------------------------------------------ 2008 // Helper for vtos entry point generation 2009 2010 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 2011 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2012 Label L; 2013 fep = __ pc(); __ push(ftos); __ jmp(L); 2014 dep = __ pc(); __ push(dtos); __ jmp(L); 2015 lep = __ pc(); __ push(ltos); __ jmp(L); 2016 aep = __ pc(); __ push(atos); __ jmp(L); 2017 bep = cep = sep = // fall through 2018 iep = __ pc(); __ push(itos); // fall through 2019 vep = __ pc(); __ bind(L); // fall through 2020 generate_and_dispatch(t); 2021 } 2022 2023 //------------------------------------------------------------------------------------------------------------------------ 2024 // Generation of individual instructions 2025 2026 // helpers for generate_and_dispatch 2027 2028 2029 2030 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 2031 : TemplateInterpreterGenerator(code) { 2032 generate_all(); // down here so it can be "virtual" 2033 } 2034 2035 //------------------------------------------------------------------------------------------------------------------------ 2036 2037 // Non-product code 2038 #ifndef PRODUCT 2039 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2040 address entry = __ pc(); 2041 2042 // prepare expression stack 2043 __ pop(rcx); // pop return address so expression stack is 'pure' 2044 __ push(state); // save tosca 2045 2046 // pass tosca registers as arguments & call tracer 2047 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx); 2048 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 2049 __ pop(state); // restore tosca 2050 2051 // return 2052 __ jmp(rcx); 2053 2054 return entry; 2055 } 2056 2057 2058 void TemplateInterpreterGenerator::count_bytecode() { 2059 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 2060 } 2061 2062 2063 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2064 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 2065 } 2066 2067 2068 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2069 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 2070 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 2071 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 2072 ExternalAddress table((address) BytecodePairHistogram::_counters); 2073 Address index(noreg, rbx, Address::times_4); 2074 __ incrementl(ArrayAddress(table, index)); 2075 } 2076 2077 2078 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2079 // Call a little run-time stub to avoid blow-up for each bytecode. 2080 // The run-time runtime saves the right registers, depending on 2081 // the tosca in-state for the given template. 2082 assert(Interpreter::trace_code(t->tos_in()) != NULL, 2083 "entry must have been generated"); 2084 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 2085 } 2086 2087 2088 void TemplateInterpreterGenerator::stop_interpreter_at() { 2089 Label L; 2090 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 2091 StopInterpreterAt); 2092 __ jcc(Assembler::notEqual, L); 2093 __ int3(); 2094 __ bind(L); 2095 } 2096 #endif // !PRODUCT 2097 #endif // CC_INTERP