1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodData.hpp" 34 #include "oops/method.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 47 #include "utilities/macros.hpp" 48 49 #define __ _masm-> 50 51 52 #ifndef CC_INTERP 53 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 54 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; 55 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 56 57 //------------------------------------------------------------------------------------------------------------------------ 58 59 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 60 address entry = __ pc(); 61 62 // Note: There should be a minimal interpreter frame set up when stack 63 // overflow occurs since we check explicitly for it now. 64 // 65 #ifdef ASSERT 66 { Label L; 67 __ lea(rax, Address(rbp, 68 frame::interpreter_frame_monitor_block_top_offset * wordSize)); 69 __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp, 70 // (stack grows negative) 71 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 72 __ stop ("interpreter frame not set up"); 73 __ bind(L); 74 } 75 #endif // ASSERT 76 // Restore bcp under the assumption that the current frame is still 77 // interpreted 78 __ restore_bcp(); 79 80 // expression stack must be empty before entering the VM if an exception 81 // happened 82 __ empty_expression_stack(); 83 __ empty_FPU_stack(); 84 // throw exception 85 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 86 return entry; 87 } 88 89 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 90 address entry = __ pc(); 91 // expression stack must be empty before entering the VM if an exception happened 92 __ empty_expression_stack(); 93 __ empty_FPU_stack(); 94 // setup parameters 95 // ??? convention: expect aberrant index in register rbx, 96 __ lea(rax, ExternalAddress((address)name)); 97 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx); 98 return entry; 99 } 100 101 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 102 address entry = __ pc(); 103 // object is at TOS 104 __ pop(rax); 105 // expression stack must be empty before entering the VM if an exception 106 // happened 107 __ empty_expression_stack(); 108 __ empty_FPU_stack(); 109 __ call_VM(noreg, 110 CAST_FROM_FN_PTR(address, 111 InterpreterRuntime::throw_ClassCastException), 112 rax); 113 return entry; 114 } 115 116 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 117 assert(!pass_oop || message == NULL, "either oop or message but not both"); 118 address entry = __ pc(); 119 if (pass_oop) { 120 // object is at TOS 121 __ pop(rbx); 122 } 123 // expression stack must be empty before entering the VM if an exception happened 124 __ empty_expression_stack(); 125 __ empty_FPU_stack(); 126 // setup parameters 127 __ lea(rax, ExternalAddress((address)name)); 128 if (pass_oop) { 129 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx); 130 } else { 131 if (message != NULL) { 132 __ lea(rbx, ExternalAddress((address)message)); 133 } else { 134 __ movptr(rbx, NULL_WORD); 135 } 136 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx); 137 } 138 // throw exception 139 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 140 return entry; 141 } 142 143 144 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 145 address entry = __ pc(); 146 // NULL last_sp until next java call 147 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 148 __ dispatch_next(state); 149 return entry; 150 } 151 152 153 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 154 address entry = __ pc(); 155 156 #ifdef COMPILER2 157 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 158 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 159 for (int i = 1; i < 8; i++) { 160 __ ffree(i); 161 } 162 } else if (UseSSE < 2) { 163 __ empty_FPU_stack(); 164 } 165 #endif 166 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 167 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 168 } else { 169 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 170 } 171 172 // In SSE mode, interpreter returns FP results in xmm0 but they need 173 // to end up back on the FPU so it can operate on them. 174 if (state == ftos && UseSSE >= 1) { 175 __ subptr(rsp, wordSize); 176 __ movflt(Address(rsp, 0), xmm0); 177 __ fld_s(Address(rsp, 0)); 178 __ addptr(rsp, wordSize); 179 } else if (state == dtos && UseSSE >= 2) { 180 __ subptr(rsp, 2*wordSize); 181 __ movdbl(Address(rsp, 0), xmm0); 182 __ fld_d(Address(rsp, 0)); 183 __ addptr(rsp, 2*wordSize); 184 } 185 186 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter"); 187 188 // Restore stack bottom in case i2c adjusted stack 189 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 190 // and NULL it as marker that rsp is now tos until next java call 191 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 192 193 __ restore_bcp(); 194 __ restore_locals(); 195 196 if (state == atos) { 197 Register mdp = rbx; 198 Register tmp = rcx; 199 __ profile_return_type(mdp, rax, tmp); 200 } 201 202 const Register cache = rbx; 203 const Register index = rcx; 204 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 205 206 const Register flags = cache; 207 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 208 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 209 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 210 __ dispatch_next(state, step); 211 212 return entry; 213 } 214 215 216 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 217 address entry = __ pc(); 218 219 // In SSE mode, FP results are in xmm0 220 if (state == ftos && UseSSE > 0) { 221 __ subptr(rsp, wordSize); 222 __ movflt(Address(rsp, 0), xmm0); 223 __ fld_s(Address(rsp, 0)); 224 __ addptr(rsp, wordSize); 225 } else if (state == dtos && UseSSE >= 2) { 226 __ subptr(rsp, 2*wordSize); 227 __ movdbl(Address(rsp, 0), xmm0); 228 __ fld_d(Address(rsp, 0)); 229 __ addptr(rsp, 2*wordSize); 230 } 231 232 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter"); 233 234 // The stack is not extended by deopt but we must NULL last_sp as this 235 // entry is like a "return". 236 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 237 __ restore_bcp(); 238 __ restore_locals(); 239 // handle exceptions 240 { Label L; 241 const Register thread = rcx; 242 __ get_thread(thread); 243 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 244 __ jcc(Assembler::zero, L); 245 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 246 __ should_not_reach_here(); 247 __ bind(L); 248 } 249 __ dispatch_next(state, step); 250 return entry; 251 } 252 253 254 int AbstractInterpreter::BasicType_as_index(BasicType type) { 255 int i = 0; 256 switch (type) { 257 case T_BOOLEAN: i = 0; break; 258 case T_CHAR : i = 1; break; 259 case T_BYTE : i = 2; break; 260 case T_SHORT : i = 3; break; 261 case T_INT : // fall through 262 case T_LONG : // fall through 263 case T_VOID : i = 4; break; 264 case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE 265 case T_DOUBLE : i = 6; break; 266 case T_OBJECT : // fall through 267 case T_ARRAY : i = 7; break; 268 default : ShouldNotReachHere(); 269 } 270 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds"); 271 return i; 272 } 273 274 275 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 276 address entry = __ pc(); 277 switch (type) { 278 case T_BOOLEAN: __ c2bool(rax); break; 279 case T_CHAR : __ andptr(rax, 0xFFFF); break; 280 case T_BYTE : __ sign_extend_byte (rax); break; 281 case T_SHORT : __ sign_extend_short(rax); break; 282 case T_INT : /* nothing to do */ break; 283 case T_DOUBLE : 284 case T_FLOAT : 285 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 286 __ pop(t); // remove return address first 287 // Must return a result for interpreter or compiler. In SSE 288 // mode, results are returned in xmm0 and the FPU stack must 289 // be empty. 290 if (type == T_FLOAT && UseSSE >= 1) { 291 // Load ST0 292 __ fld_d(Address(rsp, 0)); 293 // Store as float and empty fpu stack 294 __ fstp_s(Address(rsp, 0)); 295 // and reload 296 __ movflt(xmm0, Address(rsp, 0)); 297 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 298 __ movdbl(xmm0, Address(rsp, 0)); 299 } else { 300 // restore ST0 301 __ fld_d(Address(rsp, 0)); 302 } 303 // and pop the temp 304 __ addptr(rsp, 2 * wordSize); 305 __ push(t); // restore return address 306 } 307 break; 308 case T_OBJECT : 309 // retrieve result from frame 310 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 311 // and verify it 312 __ verify_oop(rax); 313 break; 314 default : ShouldNotReachHere(); 315 } 316 __ ret(0); // return from result handler 317 return entry; 318 } 319 320 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 321 address entry = __ pc(); 322 __ push(state); 323 __ call_VM(noreg, runtime_entry); 324 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 325 return entry; 326 } 327 328 329 // Helpers for commoning out cases in the various type of method entries. 330 // 331 332 // increment invocation count & check for overflow 333 // 334 // Note: checking for negative value instead of overflow 335 // so we have a 'sticky' overflow test 336 // 337 // rbx,: method 338 // rcx: invocation counter 339 // 340 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 341 Label done; 342 // Note: In tiered we increment either counters in MethodCounters* or in MDO 343 // depending if we're profiling or not. 344 if (TieredCompilation) { 345 int increment = InvocationCounter::count_increment; 346 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 347 Label no_mdo; 348 if (ProfileInterpreter) { 349 // Are we profiling? 350 __ movptr(rax, Address(rbx, Method::method_data_offset())); 351 __ testptr(rax, rax); 352 __ jccb(Assembler::zero, no_mdo); 353 // Increment counter in the MDO 354 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 355 in_bytes(InvocationCounter::counter_offset())); 356 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 357 __ jmp(done); 358 } 359 __ bind(no_mdo); 360 // Increment counter in MethodCounters 361 const Address invocation_counter(rax, 362 MethodCounters::invocation_counter_offset() + 363 InvocationCounter::counter_offset()); 364 365 __ get_method_counters(rbx, rax, done); 366 __ increment_mask_and_jump(invocation_counter, increment, mask, 367 rcx, false, Assembler::zero, overflow); 368 __ bind(done); 369 } else { 370 const Address backedge_counter (rax, 371 MethodCounters::backedge_counter_offset() + 372 InvocationCounter::counter_offset()); 373 const Address invocation_counter(rax, 374 MethodCounters::invocation_counter_offset() + 375 InvocationCounter::counter_offset()); 376 377 __ get_method_counters(rbx, rax, done); 378 379 if (ProfileInterpreter) { 380 __ incrementl(Address(rax, 381 MethodCounters::interpreter_invocation_counter_offset())); 382 } 383 384 // Update standard invocation counters 385 __ movl(rcx, invocation_counter); 386 __ incrementl(rcx, InvocationCounter::count_increment); 387 __ movl(invocation_counter, rcx); // save invocation count 388 389 __ movl(rax, backedge_counter); // load backedge counter 390 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 391 392 __ addl(rcx, rax); // add both counters 393 394 // profile_method is non-null only for interpreted method so 395 // profile_method != NULL == !native_call 396 // BytecodeInterpreter only calls for native so code is elided. 397 398 if (ProfileInterpreter && profile_method != NULL) { 399 // Test to see if we should create a method data oop 400 __ cmp32(rcx, 401 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 402 __ jcc(Assembler::less, *profile_method_continue); 403 404 // if no method data exists, go to profile_method 405 __ test_method_data_pointer(rax, *profile_method); 406 } 407 408 __ cmp32(rcx, 409 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 410 __ jcc(Assembler::aboveEqual, *overflow); 411 __ bind(done); 412 } 413 } 414 415 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 416 417 // Asm interpreter on entry 418 // rdi - locals 419 // rsi - bcp 420 // rbx, - method 421 // rdx - cpool 422 // rbp, - interpreter frame 423 424 // C++ interpreter on entry 425 // rsi - new interpreter state pointer 426 // rbp - interpreter frame pointer 427 // rbx - method 428 429 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 430 // rbx, - method 431 // rcx - rcvr (assuming there is one) 432 // top of stack return address of interpreter caller 433 // rsp - sender_sp 434 435 // C++ interpreter only 436 // rsi - previous interpreter state pointer 437 438 // InterpreterRuntime::frequency_counter_overflow takes one argument 439 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 440 // The call returns the address of the verified entry point for the method or NULL 441 // if the compilation did not complete (either went background or bailed out). 442 __ movptr(rax, (intptr_t)false); 443 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); 444 445 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 446 447 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame 448 // and jump to the interpreted entry. 449 __ jmp(*do_continue, relocInfo::none); 450 451 } 452 453 void InterpreterGenerator::generate_stack_overflow_check(void) { 454 // see if we've got enough room on the stack for locals plus overhead. 455 // the expression stack grows down incrementally, so the normal guard 456 // page mechanism will work for that. 457 // 458 // Registers live on entry: 459 // 460 // Asm interpreter 461 // rdx: number of additional locals this frame needs (what we must check) 462 // rbx,: Method* 463 464 // destroyed on exit 465 // rax, 466 467 // NOTE: since the additional locals are also always pushed (wasn't obvious in 468 // generate_method_entry) so the guard should work for them too. 469 // 470 471 // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp 472 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 473 474 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). 475 // be sure to change this if you add/subtract anything to/from the overhead area 476 const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size; 477 478 const int page_size = os::vm_page_size(); 479 480 Label after_frame_check; 481 482 // see if the frame is greater than one page in size. If so, 483 // then we need to verify there is enough stack space remaining 484 // for the additional locals. 485 __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize); 486 __ jcc(Assembler::belowEqual, after_frame_check); 487 488 // compute rsp as if this were going to be the last frame on 489 // the stack before the red zone 490 491 Label after_frame_check_pop; 492 493 __ push(rsi); 494 495 const Register thread = rsi; 496 497 __ get_thread(thread); 498 499 const Address stack_base(thread, Thread::stack_base_offset()); 500 const Address stack_size(thread, Thread::stack_size_offset()); 501 502 // locals + overhead, in bytes 503 __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size)); 504 505 #ifdef ASSERT 506 Label stack_base_okay, stack_size_okay; 507 // verify that thread stack base is non-zero 508 __ cmpptr(stack_base, (int32_t)NULL_WORD); 509 __ jcc(Assembler::notEqual, stack_base_okay); 510 __ stop("stack base is zero"); 511 __ bind(stack_base_okay); 512 // verify that thread stack size is non-zero 513 __ cmpptr(stack_size, 0); 514 __ jcc(Assembler::notEqual, stack_size_okay); 515 __ stop("stack size is zero"); 516 __ bind(stack_size_okay); 517 #endif 518 519 // Add stack base to locals and subtract stack size 520 __ addptr(rax, stack_base); 521 __ subptr(rax, stack_size); 522 523 // Use the maximum number of pages we might bang. 524 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 525 (StackRedPages+StackYellowPages); 526 __ addptr(rax, max_pages * page_size); 527 528 // check against the current stack bottom 529 __ cmpptr(rsp, rax); 530 __ jcc(Assembler::above, after_frame_check_pop); 531 532 __ pop(rsi); // get saved bcp / (c++ prev state ). 533 534 // Restore sender's sp as SP. This is necessary if the sender's 535 // frame is an extended compiled frame (see gen_c2i_adapter()) 536 // and safer anyway in case of JSR292 adaptations. 537 538 __ pop(rax); // return address must be moved if SP is changed 539 __ mov(rsp, rsi); 540 __ push(rax); 541 542 // Note: the restored frame is not necessarily interpreted. 543 // Use the shared runtime version of the StackOverflowError. 544 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 545 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 546 // all done with frame size check 547 __ bind(after_frame_check_pop); 548 __ pop(rsi); 549 550 __ bind(after_frame_check); 551 } 552 553 // Allocate monitor and lock method (asm interpreter) 554 // rbx, - Method* 555 // 556 void InterpreterGenerator::lock_method(void) { 557 // synchronize method 558 const Address access_flags (rbx, Method::access_flags_offset()); 559 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 560 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 561 562 #ifdef ASSERT 563 { Label L; 564 __ movl(rax, access_flags); 565 __ testl(rax, JVM_ACC_SYNCHRONIZED); 566 __ jcc(Assembler::notZero, L); 567 __ stop("method doesn't need synchronization"); 568 __ bind(L); 569 } 570 #endif // ASSERT 571 // get synchronization object 572 { Label done; 573 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 574 __ movl(rax, access_flags); 575 __ testl(rax, JVM_ACC_STATIC); 576 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 577 __ jcc(Assembler::zero, done); 578 __ movptr(rax, Address(rbx, Method::const_offset())); 579 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); 580 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes())); 581 __ movptr(rax, Address(rax, mirror_offset)); 582 __ bind(done); 583 } 584 // add space for monitor & lock 585 __ subptr(rsp, entry_size); // add space for a monitor entry 586 __ movptr(monitor_block_top, rsp); // set new monitor block top 587 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object 588 __ mov(rdx, rsp); // object address 589 __ lock_object(rdx); 590 } 591 592 // 593 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 594 // and for native methods hence the shared code. 595 596 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 597 // initialize fixed part of activation frame 598 __ push(rax); // save return address 599 __ enter(); // save old & set new rbp, 600 601 602 __ push(rsi); // set sender sp 603 __ push((int32_t)NULL_WORD); // leave last_sp as null 604 __ movptr(rsi, Address(rbx,Method::const_offset())); // get ConstMethod* 605 __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase 606 __ push(rbx); // save Method* 607 if (ProfileInterpreter) { 608 Label method_data_continue; 609 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 610 __ testptr(rdx, rdx); 611 __ jcc(Assembler::zero, method_data_continue); 612 __ addptr(rdx, in_bytes(MethodData::data_offset())); 613 __ bind(method_data_continue); 614 __ push(rdx); // set the mdp (method data pointer) 615 } else { 616 __ push(0); 617 } 618 619 __ movptr(rdx, Address(rbx, Method::const_offset())); 620 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 621 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 622 __ push(rdx); // set constant pool cache 623 __ push(rdi); // set locals pointer 624 if (native_call) { 625 __ push(0); // no bcp 626 } else { 627 __ push(rsi); // set bcp 628 } 629 __ push(0); // reserve word for pointer to expression stack bottom 630 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 631 } 632 633 // End of helpers 634 635 // 636 // Various method entries 637 //------------------------------------------------------------------------------------------------------------------------ 638 // 639 // 640 641 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry 642 643 address InterpreterGenerator::generate_accessor_entry(void) { 644 645 // rbx,: Method* 646 // rcx: receiver (preserve for slow entry into asm interpreter) 647 648 // rsi: senderSP must preserved for slow path, set SP to it on fast path 649 650 address entry_point = __ pc(); 651 Label xreturn_path; 652 653 // do fastpath for resolved accessor methods 654 if (UseFastAccessorMethods) { 655 Label slow_path; 656 // If we need a safepoint check, generate full interpreter entry. 657 ExternalAddress state(SafepointSynchronize::address_of_state()); 658 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 659 SafepointSynchronize::_not_synchronized); 660 661 __ jcc(Assembler::notEqual, slow_path); 662 // ASM/C++ Interpreter 663 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1 664 // Note: We can only use this code if the getfield has been resolved 665 // and if we don't have a null-pointer exception => check for 666 // these conditions first and use slow path if necessary. 667 // rbx,: method 668 // rcx: receiver 669 __ movptr(rax, Address(rsp, wordSize)); 670 671 // check if local 0 != NULL and read field 672 __ testptr(rax, rax); 673 __ jcc(Assembler::zero, slow_path); 674 675 // read first instruction word and extract bytecode @ 1 and index @ 2 676 __ movptr(rdx, Address(rbx, Method::const_offset())); 677 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset())); 678 __ movl(rdx, Address(rdx, ConstMethod::codes_offset())); 679 // Shift codes right to get the index on the right. 680 // The bytecode fetched looks like <index><0xb4><0x2a> 681 __ shrl(rdx, 2*BitsPerByte); 682 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); 683 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes())); 684 685 // rax,: local 0 686 // rbx,: method 687 // rcx: receiver - do not destroy since it is needed for slow path! 688 // rcx: scratch 689 // rdx: constant pool cache index 690 // rdi: constant pool cache 691 // rsi: sender sp 692 693 // check if getfield has been resolved and read constant pool cache entry 694 // check the validity of the cache entry by testing whether _indices field 695 // contains Bytecode::_getfield in b1 byte. 696 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); 697 __ movl(rcx, 698 Address(rdi, 699 rdx, 700 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); 701 __ shrl(rcx, 2*BitsPerByte); 702 __ andl(rcx, 0xFF); 703 __ cmpl(rcx, Bytecodes::_getfield); 704 __ jcc(Assembler::notEqual, slow_path); 705 706 // Note: constant pool entry is not valid before bytecode is resolved 707 __ movptr(rcx, 708 Address(rdi, 709 rdx, 710 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 711 __ movl(rdx, 712 Address(rdi, 713 rdx, 714 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 715 716 Label notByte, notShort, notChar; 717 const Address field_address (rax, rcx, Address::times_1); 718 719 // Need to differentiate between igetfield, agetfield, bgetfield etc. 720 // because they are different sizes. 721 // Use the type from the constant pool cache 722 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); 723 // Make sure we don't need to mask rdx after the above shift 724 ConstantPoolCacheEntry::verify_tos_state_shift(); 725 __ cmpl(rdx, btos); 726 __ jcc(Assembler::notEqual, notByte); 727 __ load_signed_byte(rax, field_address); 728 __ jmp(xreturn_path); 729 730 __ bind(notByte); 731 __ cmpl(rdx, stos); 732 __ jcc(Assembler::notEqual, notShort); 733 __ load_signed_short(rax, field_address); 734 __ jmp(xreturn_path); 735 736 __ bind(notShort); 737 __ cmpl(rdx, ctos); 738 __ jcc(Assembler::notEqual, notChar); 739 __ load_unsigned_short(rax, field_address); 740 __ jmp(xreturn_path); 741 742 __ bind(notChar); 743 #ifdef ASSERT 744 Label okay; 745 __ cmpl(rdx, atos); 746 __ jcc(Assembler::equal, okay); 747 __ cmpl(rdx, itos); 748 __ jcc(Assembler::equal, okay); 749 __ stop("what type is this?"); 750 __ bind(okay); 751 #endif // ASSERT 752 // All the rest are a 32 bit wordsize 753 // This is ok for now. Since fast accessors should be going away 754 __ movptr(rax, field_address); 755 756 __ bind(xreturn_path); 757 758 // _ireturn/_areturn 759 __ pop(rdi); // get return address 760 __ mov(rsp, rsi); // set sp to sender sp 761 __ jmp(rdi); 762 763 // generate a vanilla interpreter entry as the slow path 764 __ bind(slow_path); 765 766 (void) generate_normal_entry(false); 767 return entry_point; 768 } 769 return NULL; 770 771 } 772 773 // Method entry for java.lang.ref.Reference.get. 774 address InterpreterGenerator::generate_Reference_get_entry(void) { 775 #if INCLUDE_ALL_GCS 776 // Code: _aload_0, _getfield, _areturn 777 // parameter size = 1 778 // 779 // The code that gets generated by this routine is split into 2 parts: 780 // 1. The "intrinsified" code for G1 (or any SATB based GC), 781 // 2. The slow path - which is an expansion of the regular method entry. 782 // 783 // Notes:- 784 // * In the G1 code we do not check whether we need to block for 785 // a safepoint. If G1 is enabled then we must execute the specialized 786 // code for Reference.get (except when the Reference object is null) 787 // so that we can log the value in the referent field with an SATB 788 // update buffer. 789 // If the code for the getfield template is modified so that the 790 // G1 pre-barrier code is executed when the current method is 791 // Reference.get() then going through the normal method entry 792 // will be fine. 793 // * The G1 code below can, however, check the receiver object (the instance 794 // of java.lang.Reference) and jump to the slow path if null. If the 795 // Reference object is null then we obviously cannot fetch the referent 796 // and so we don't need to call the G1 pre-barrier. Thus we can use the 797 // regular method entry code to generate the NPE. 798 // 799 // This code is based on generate_accessor_enty. 800 801 // rbx,: Method* 802 // rcx: receiver (preserve for slow entry into asm interpreter) 803 804 // rsi: senderSP must preserved for slow path, set SP to it on fast path 805 806 address entry = __ pc(); 807 808 const int referent_offset = java_lang_ref_Reference::referent_offset; 809 guarantee(referent_offset > 0, "referent offset not initialized"); 810 811 if (UseG1GC) { 812 Label slow_path; 813 814 // Check if local 0 != NULL 815 // If the receiver is null then it is OK to jump to the slow path. 816 __ movptr(rax, Address(rsp, wordSize)); 817 __ testptr(rax, rax); 818 __ jcc(Assembler::zero, slow_path); 819 820 // rax: local 0 (must be preserved across the G1 barrier call) 821 // 822 // rbx: method (at this point it's scratch) 823 // rcx: receiver (at this point it's scratch) 824 // rdx: scratch 825 // rdi: scratch 826 // 827 // rsi: sender sp 828 829 // Preserve the sender sp in case the pre-barrier 830 // calls the runtime 831 __ push(rsi); 832 833 // Load the value of the referent field. 834 const Address field_address(rax, referent_offset); 835 __ movptr(rax, field_address); 836 837 // Generate the G1 pre-barrier code to log the value of 838 // the referent field in an SATB buffer. 839 __ get_thread(rcx); 840 __ g1_write_barrier_pre(noreg /* obj */, 841 rax /* pre_val */, 842 rcx /* thread */, 843 rbx /* tmp */, 844 true /* tosca_save */, 845 true /* expand_call */); 846 847 // _areturn 848 __ pop(rsi); // get sender sp 849 __ pop(rdi); // get return address 850 __ mov(rsp, rsi); // set sp to sender sp 851 __ jmp(rdi); 852 853 __ bind(slow_path); 854 (void) generate_normal_entry(false); 855 856 return entry; 857 } 858 #endif // INCLUDE_ALL_GCS 859 860 // If G1 is not enabled then attempt to go through the accessor entry point 861 // Reference.get is an accessor 862 return generate_accessor_entry(); 863 } 864 865 /** 866 * Method entry for static native methods: 867 * int java.util.zip.CRC32.update(int crc, int b) 868 */ 869 address InterpreterGenerator::generate_CRC32_update_entry() { 870 if (UseCRC32Intrinsics) { 871 address entry = __ pc(); 872 873 // rbx,: Method* 874 // rsi: senderSP must preserved for slow path, set SP to it on fast path 875 // rdx: scratch 876 // rdi: scratch 877 878 Label slow_path; 879 // If we need a safepoint check, generate full interpreter entry. 880 ExternalAddress state(SafepointSynchronize::address_of_state()); 881 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 882 SafepointSynchronize::_not_synchronized); 883 __ jcc(Assembler::notEqual, slow_path); 884 885 // We don't generate local frame and don't align stack because 886 // we call stub code and there is no safepoint on this path. 887 888 // Load parameters 889 const Register crc = rax; // crc 890 const Register val = rdx; // source java byte value 891 const Register tbl = rdi; // scratch 892 893 // Arguments are reversed on java expression stack 894 __ movl(val, Address(rsp, wordSize)); // byte value 895 __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC 896 897 __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); 898 __ notl(crc); // ~crc 899 __ update_byte_crc32(crc, val, tbl); 900 __ notl(crc); // ~crc 901 // result in rax 902 903 // _areturn 904 __ pop(rdi); // get return address 905 __ mov(rsp, rsi); // set sp to sender sp 906 __ jmp(rdi); 907 908 // generate a vanilla native entry as the slow path 909 __ bind(slow_path); 910 911 (void) generate_native_entry(false); 912 913 return entry; 914 } 915 return generate_native_entry(false); 916 } 917 918 /** 919 * Method entry for static native methods: 920 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 921 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 922 */ 923 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 924 if (UseCRC32Intrinsics) { 925 address entry = __ pc(); 926 927 // rbx,: Method* 928 // rsi: senderSP must preserved for slow path, set SP to it on fast path 929 // rdx: scratch 930 // rdi: scratch 931 932 Label slow_path; 933 // If we need a safepoint check, generate full interpreter entry. 934 ExternalAddress state(SafepointSynchronize::address_of_state()); 935 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 936 SafepointSynchronize::_not_synchronized); 937 __ jcc(Assembler::notEqual, slow_path); 938 939 // We don't generate local frame and don't align stack because 940 // we call stub code and there is no safepoint on this path. 941 942 // Load parameters 943 const Register crc = rax; // crc 944 const Register buf = rdx; // source java byte array address 945 const Register len = rdi; // length 946 947 // Arguments are reversed on java expression stack 948 __ movl(len, Address(rsp, wordSize)); // Length 949 // Calculate address of start element 950 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 951 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf 952 __ addptr(buf, Address(rsp, 2*wordSize)); // + offset 953 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC 954 } else { 955 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array 956 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 957 __ addptr(buf, Address(rsp, 2*wordSize)); // + offset 958 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC 959 } 960 961 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); 962 // result in rax 963 964 // _areturn 965 __ pop(rdi); // get return address 966 __ mov(rsp, rsi); // set sp to sender sp 967 __ jmp(rdi); 968 969 // generate a vanilla native entry as the slow path 970 __ bind(slow_path); 971 972 (void) generate_native_entry(false); 973 974 return entry; 975 } 976 return generate_native_entry(false); 977 } 978 979 // 980 // Interpreter stub for calling a native method. (asm interpreter) 981 // This sets up a somewhat different looking stack for calling the native method 982 // than the typical interpreter frame setup. 983 // 984 985 address InterpreterGenerator::generate_native_entry(bool synchronized) { 986 // determine code generation flags 987 bool inc_counter = UseCompiler || CountCompiledCalls; 988 989 // rbx,: Method* 990 // rsi: sender sp 991 // rsi: previous interpreter state (C++ interpreter) must preserve 992 address entry_point = __ pc(); 993 994 const Address constMethod (rbx, Method::const_offset()); 995 const Address access_flags (rbx, Method::access_flags_offset()); 996 const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset()); 997 998 // get parameter size (always needed) 999 __ movptr(rcx, constMethod); 1000 __ load_unsigned_short(rcx, size_of_parameters); 1001 1002 // native calls don't need the stack size check since they have no expression stack 1003 // and the arguments are already on the stack and we only add a handful of words 1004 // to the stack 1005 1006 // rbx,: Method* 1007 // rcx: size of parameters 1008 // rsi: sender sp 1009 1010 __ pop(rax); // get return address 1011 // for natives the size of locals is zero 1012 1013 // compute beginning of parameters (rdi) 1014 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1015 1016 1017 // add 2 zero-initialized slots for native calls 1018 // NULL result handler 1019 __ push((int32_t)NULL_WORD); 1020 // NULL oop temp (mirror or jni oop result) 1021 __ push((int32_t)NULL_WORD); 1022 1023 // initialize fixed part of activation frame 1024 generate_fixed_frame(true); 1025 1026 // make sure method is native & not abstract 1027 #ifdef ASSERT 1028 __ movl(rax, access_flags); 1029 { 1030 Label L; 1031 __ testl(rax, JVM_ACC_NATIVE); 1032 __ jcc(Assembler::notZero, L); 1033 __ stop("tried to execute non-native method as native"); 1034 __ bind(L); 1035 } 1036 { Label L; 1037 __ testl(rax, JVM_ACC_ABSTRACT); 1038 __ jcc(Assembler::zero, L); 1039 __ stop("tried to execute abstract method in interpreter"); 1040 __ bind(L); 1041 } 1042 #endif 1043 1044 // Since at this point in the method invocation the exception handler 1045 // would try to exit the monitor of synchronized methods which hasn't 1046 // been entered yet, we set the thread local variable 1047 // _do_not_unlock_if_synchronized to true. The remove_activation will 1048 // check this flag. 1049 1050 __ get_thread(rax); 1051 const Address do_not_unlock_if_synchronized(rax, 1052 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1053 __ movbool(do_not_unlock_if_synchronized, true); 1054 1055 // increment invocation count & check for overflow 1056 Label invocation_counter_overflow; 1057 if (inc_counter) { 1058 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1059 } 1060 1061 Label continue_after_compile; 1062 __ bind(continue_after_compile); 1063 1064 bang_stack_shadow_pages(true); 1065 1066 // reset the _do_not_unlock_if_synchronized flag 1067 __ get_thread(rax); 1068 __ movbool(do_not_unlock_if_synchronized, false); 1069 1070 // check for synchronized methods 1071 // Must happen AFTER invocation_counter check and stack overflow check, 1072 // so method is not locked if overflows. 1073 // 1074 if (synchronized) { 1075 lock_method(); 1076 } else { 1077 // no synchronization necessary 1078 #ifdef ASSERT 1079 { Label L; 1080 __ movl(rax, access_flags); 1081 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1082 __ jcc(Assembler::zero, L); 1083 __ stop("method needs synchronization"); 1084 __ bind(L); 1085 } 1086 #endif 1087 } 1088 1089 // start execution 1090 #ifdef ASSERT 1091 { Label L; 1092 const Address monitor_block_top (rbp, 1093 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1094 __ movptr(rax, monitor_block_top); 1095 __ cmpptr(rax, rsp); 1096 __ jcc(Assembler::equal, L); 1097 __ stop("broken stack frame setup in interpreter"); 1098 __ bind(L); 1099 } 1100 #endif 1101 1102 // jvmti/dtrace support 1103 __ notify_method_entry(); 1104 1105 // work registers 1106 const Register method = rbx; 1107 const Register thread = rdi; 1108 const Register t = rcx; 1109 1110 // allocate space for parameters 1111 __ get_method(method); 1112 __ movptr(t, Address(method, Method::const_offset())); 1113 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 1114 1115 __ shlptr(t, Interpreter::logStackElementSize); 1116 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 1117 __ subptr(rsp, t); 1118 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 1119 1120 // get signature handler 1121 { Label L; 1122 __ movptr(t, Address(method, Method::signature_handler_offset())); 1123 __ testptr(t, t); 1124 __ jcc(Assembler::notZero, L); 1125 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 1126 __ get_method(method); 1127 __ movptr(t, Address(method, Method::signature_handler_offset())); 1128 __ bind(L); 1129 } 1130 1131 // call signature handler 1132 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code"); 1133 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code"); 1134 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code"); 1135 // The generated handlers do not touch RBX (the method oop). 1136 // However, large signatures cannot be cached and are generated 1137 // each time here. The slow-path generator will blow RBX 1138 // sometime, so we must reload it after the call. 1139 __ call(t); 1140 __ get_method(method); // slow path call blows RBX on DevStudio 5.0 1141 1142 // result handler is in rax, 1143 // set result handler 1144 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax); 1145 1146 // pass mirror handle if static call 1147 { Label L; 1148 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1149 __ movl(t, Address(method, Method::access_flags_offset())); 1150 __ testl(t, JVM_ACC_STATIC); 1151 __ jcc(Assembler::zero, L); 1152 // get mirror 1153 __ movptr(t, Address(method, Method:: const_offset())); 1154 __ movptr(t, Address(t, ConstMethod::constants_offset())); 1155 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 1156 __ movptr(t, Address(t, mirror_offset)); 1157 // copy mirror into activation frame 1158 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); 1159 // pass handle to mirror 1160 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1161 __ movptr(Address(rsp, wordSize), t); 1162 __ bind(L); 1163 } 1164 1165 // get native function entry point 1166 { Label L; 1167 __ movptr(rax, Address(method, Method::native_function_offset())); 1168 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1169 __ cmpptr(rax, unsatisfied.addr()); 1170 __ jcc(Assembler::notEqual, L); 1171 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); 1172 __ get_method(method); 1173 __ movptr(rax, Address(method, Method::native_function_offset())); 1174 __ bind(L); 1175 } 1176 1177 // pass JNIEnv 1178 __ get_thread(thread); 1179 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1180 __ movptr(Address(rsp, 0), t); 1181 1182 // set_last_Java_frame_before_call 1183 // It is enough that the pc() 1184 // points into the right code segment. It does not have to be the correct return pc. 1185 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1186 1187 // change thread state 1188 #ifdef ASSERT 1189 { Label L; 1190 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1191 __ cmpl(t, _thread_in_Java); 1192 __ jcc(Assembler::equal, L); 1193 __ stop("Wrong thread state in native stub"); 1194 __ bind(L); 1195 } 1196 #endif 1197 1198 // Change state to native 1199 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native); 1200 __ call(rax); 1201 1202 // result potentially in rdx:rax or ST0 1203 1204 // Verify or restore cpu control state after JNI call 1205 __ restore_cpu_control_state_after_jni(); 1206 1207 // save potential result in ST(0) & rdx:rax 1208 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1209 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1210 // It is safe to do this push because state is _thread_in_native and return address will be found 1211 // via _last_native_pc and not via _last_jave_sp 1212 1213 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1214 // If the order changes or anything else is added to the stack the code in 1215 // interpreter_frame_result will have to be changed. 1216 1217 { Label L; 1218 Label push_double; 1219 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1220 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1221 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1222 float_handler.addr()); 1223 __ jcc(Assembler::equal, push_double); 1224 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1225 double_handler.addr()); 1226 __ jcc(Assembler::notEqual, L); 1227 __ bind(push_double); 1228 __ push(dtos); 1229 __ bind(L); 1230 } 1231 __ push(ltos); 1232 1233 // change thread state 1234 __ get_thread(thread); 1235 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); 1236 if(os::is_MP()) { 1237 if (UseMembar) { 1238 // Force this write out before the read below 1239 __ membar(Assembler::Membar_mask_bits( 1240 Assembler::LoadLoad | Assembler::LoadStore | 1241 Assembler::StoreLoad | Assembler::StoreStore)); 1242 } else { 1243 // Write serialization page so VM thread can do a pseudo remote membar. 1244 // We use the current thread pointer to calculate a thread specific 1245 // offset to write to within the page. This minimizes bus traffic 1246 // due to cache line collision. 1247 __ serialize_memory(thread, rcx); 1248 } 1249 } 1250 1251 if (AlwaysRestoreFPU) { 1252 // Make sure the control word is correct. 1253 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1254 } 1255 1256 // check for safepoint operation in progress and/or pending suspend requests 1257 { Label Continue; 1258 1259 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1260 SafepointSynchronize::_not_synchronized); 1261 1262 Label L; 1263 __ jcc(Assembler::notEqual, L); 1264 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1265 __ jcc(Assembler::equal, Continue); 1266 __ bind(L); 1267 1268 // Don't use call_VM as it will see a possible pending exception and forward it 1269 // and never return here preventing us from clearing _last_native_pc down below. 1270 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are 1271 // preserved and correspond to the bcp/locals pointers. So we do a runtime call 1272 // by hand. 1273 // 1274 __ push(thread); 1275 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1276 JavaThread::check_special_condition_for_native_trans))); 1277 __ increment(rsp, wordSize); 1278 __ get_thread(thread); 1279 1280 __ bind(Continue); 1281 } 1282 1283 // change thread state 1284 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1285 1286 __ reset_last_Java_frame(thread, true, true); 1287 1288 // reset handle block 1289 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1290 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD); 1291 1292 // If result was an oop then unbox and save it in the frame 1293 { Label L; 1294 Label no_oop, store_result; 1295 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT)); 1296 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), 1297 handler.addr()); 1298 __ jcc(Assembler::notEqual, no_oop); 1299 __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD); 1300 __ pop(ltos); 1301 __ testptr(rax, rax); 1302 __ jcc(Assembler::zero, store_result); 1303 // unbox 1304 __ movptr(rax, Address(rax, 0)); 1305 __ bind(store_result); 1306 __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax); 1307 // keep stack depth as expected by pushing oop which will eventually be discarded 1308 __ push(ltos); 1309 __ bind(no_oop); 1310 } 1311 1312 { 1313 Label no_reguard; 1314 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); 1315 __ jcc(Assembler::notEqual, no_reguard); 1316 1317 __ pusha(); 1318 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1319 __ popa(); 1320 1321 __ bind(no_reguard); 1322 } 1323 1324 // restore rsi to have legal interpreter frame, 1325 // i.e., bci == 0 <=> rsi == code_base() 1326 // Can't call_VM until bcp is within reasonable. 1327 __ get_method(method); // method is junk from thread_in_native to now. 1328 __ movptr(rsi, Address(method,Method::const_offset())); // get ConstMethod* 1329 __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase 1330 1331 // handle exceptions (exception handling will handle unlocking!) 1332 { Label L; 1333 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1334 __ jcc(Assembler::zero, L); 1335 // Note: At some point we may want to unify this with the code used in call_VM_base(); 1336 // i.e., we should use the StubRoutines::forward_exception code. For now this 1337 // doesn't work here because the rsp is not correctly set at this point. 1338 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 1339 __ should_not_reach_here(); 1340 __ bind(L); 1341 } 1342 1343 // do unlocking if necessary 1344 { Label L; 1345 __ movl(t, Address(method, Method::access_flags_offset())); 1346 __ testl(t, JVM_ACC_SYNCHRONIZED); 1347 __ jcc(Assembler::zero, L); 1348 // the code below should be shared with interpreter macro assembler implementation 1349 { Label unlock; 1350 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1351 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1352 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); 1353 1354 __ lea(rdx, monitor); // address of first monitor 1355 1356 __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); 1357 __ testptr(t, t); 1358 __ jcc(Assembler::notZero, unlock); 1359 1360 // Entry already unlocked, need to throw exception 1361 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1362 __ should_not_reach_here(); 1363 1364 __ bind(unlock); 1365 __ unlock_object(rdx); 1366 } 1367 __ bind(L); 1368 } 1369 1370 // jvmti/dtrace support 1371 // Note: This must happen _after_ handling/throwing any exceptions since 1372 // the exception handler code notifies the runtime of method exits 1373 // too. If this happens before, method entry/exit notifications are 1374 // not properly paired (was bug - gri 11/22/99). 1375 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1376 1377 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result 1378 __ pop(ltos); 1379 __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1380 __ call(t); 1381 1382 // remove activation 1383 __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp 1384 __ leave(); // remove frame anchor 1385 __ pop(rdi); // get return address 1386 __ mov(rsp, t); // set sp to sender sp 1387 __ jmp(rdi); 1388 1389 if (inc_counter) { 1390 // Handle overflow of counter and compile method 1391 __ bind(invocation_counter_overflow); 1392 generate_counter_overflow(&continue_after_compile); 1393 } 1394 1395 return entry_point; 1396 } 1397 1398 // 1399 // Generic interpreted method entry to (asm) interpreter 1400 // 1401 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1402 // determine code generation flags 1403 bool inc_counter = UseCompiler || CountCompiledCalls; 1404 1405 // rbx,: Method* 1406 // rsi: sender sp 1407 address entry_point = __ pc(); 1408 1409 const Address constMethod (rbx, Method::const_offset()); 1410 const Address access_flags (rbx, Method::access_flags_offset()); 1411 const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); 1412 const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset()); 1413 1414 // get parameter size (always needed) 1415 __ movptr(rdx, constMethod); 1416 __ load_unsigned_short(rcx, size_of_parameters); 1417 1418 // rbx,: Method* 1419 // rcx: size of parameters 1420 1421 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1422 1423 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1424 __ subl(rdx, rcx); // rdx = no. of additional locals 1425 1426 // see if we've got enough room on the stack for locals plus overhead. 1427 generate_stack_overflow_check(); 1428 1429 // get return address 1430 __ pop(rax); 1431 1432 // compute beginning of parameters (rdi) 1433 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1434 1435 // rdx - # of additional locals 1436 // allocate space for locals 1437 // explicitly initialize locals 1438 { 1439 Label exit, loop; 1440 __ testl(rdx, rdx); 1441 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1442 __ bind(loop); 1443 __ push((int32_t)NULL_WORD); // initialize local variables 1444 __ decrement(rdx); // until everything initialized 1445 __ jcc(Assembler::greater, loop); 1446 __ bind(exit); 1447 } 1448 1449 // initialize fixed part of activation frame 1450 generate_fixed_frame(false); 1451 1452 // make sure method is not native & not abstract 1453 #ifdef ASSERT 1454 __ movl(rax, access_flags); 1455 { 1456 Label L; 1457 __ testl(rax, JVM_ACC_NATIVE); 1458 __ jcc(Assembler::zero, L); 1459 __ stop("tried to execute native method as non-native"); 1460 __ bind(L); 1461 } 1462 { Label L; 1463 __ testl(rax, JVM_ACC_ABSTRACT); 1464 __ jcc(Assembler::zero, L); 1465 __ stop("tried to execute abstract method in interpreter"); 1466 __ bind(L); 1467 } 1468 #endif 1469 1470 // Since at this point in the method invocation the exception handler 1471 // would try to exit the monitor of synchronized methods which hasn't 1472 // been entered yet, we set the thread local variable 1473 // _do_not_unlock_if_synchronized to true. The remove_activation will 1474 // check this flag. 1475 1476 __ get_thread(rax); 1477 const Address do_not_unlock_if_synchronized(rax, 1478 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1479 __ movbool(do_not_unlock_if_synchronized, true); 1480 1481 __ profile_parameters_type(rax, rcx, rdx); 1482 // increment invocation count & check for overflow 1483 Label invocation_counter_overflow; 1484 Label profile_method; 1485 Label profile_method_continue; 1486 if (inc_counter) { 1487 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1488 if (ProfileInterpreter) { 1489 __ bind(profile_method_continue); 1490 } 1491 } 1492 Label continue_after_compile; 1493 __ bind(continue_after_compile); 1494 1495 bang_stack_shadow_pages(false); 1496 1497 // reset the _do_not_unlock_if_synchronized flag 1498 __ get_thread(rax); 1499 __ movbool(do_not_unlock_if_synchronized, false); 1500 1501 // check for synchronized methods 1502 // Must happen AFTER invocation_counter check and stack overflow check, 1503 // so method is not locked if overflows. 1504 // 1505 if (synchronized) { 1506 // Allocate monitor and lock method 1507 lock_method(); 1508 } else { 1509 // no synchronization necessary 1510 #ifdef ASSERT 1511 { Label L; 1512 __ movl(rax, access_flags); 1513 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1514 __ jcc(Assembler::zero, L); 1515 __ stop("method needs synchronization"); 1516 __ bind(L); 1517 } 1518 #endif 1519 } 1520 1521 // start execution 1522 #ifdef ASSERT 1523 { Label L; 1524 const Address monitor_block_top (rbp, 1525 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1526 __ movptr(rax, monitor_block_top); 1527 __ cmpptr(rax, rsp); 1528 __ jcc(Assembler::equal, L); 1529 __ stop("broken stack frame setup in interpreter"); 1530 __ bind(L); 1531 } 1532 #endif 1533 1534 // jvmti support 1535 __ notify_method_entry(); 1536 1537 __ dispatch_next(vtos); 1538 1539 // invocation counter overflow 1540 if (inc_counter) { 1541 if (ProfileInterpreter) { 1542 // We have decided to profile this method in the interpreter 1543 __ bind(profile_method); 1544 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1545 __ set_method_data_pointer_for_bcp(); 1546 __ get_method(rbx); 1547 __ jmp(profile_method_continue); 1548 } 1549 // Handle overflow of counter and compile method 1550 __ bind(invocation_counter_overflow); 1551 generate_counter_overflow(&continue_after_compile); 1552 } 1553 1554 return entry_point; 1555 } 1556 1557 //------------------------------------------------------------------------------------------------------------------------ 1558 // Entry points 1559 // 1560 // Here we generate the various kind of entries into the interpreter. 1561 // The two main entry type are generic bytecode methods and native call method. 1562 // These both come in synchronized and non-synchronized versions but the 1563 // frame layout they create is very similar. The other method entry 1564 // types are really just special purpose entries that are really entry 1565 // and interpretation all in one. These are for trivial methods like 1566 // accessor, empty, or special math methods. 1567 // 1568 // When control flow reaches any of the entry types for the interpreter 1569 // the following holds -> 1570 // 1571 // Arguments: 1572 // 1573 // rbx,: Method* 1574 // rcx: receiver 1575 // 1576 // 1577 // Stack layout immediately at entry 1578 // 1579 // [ return address ] <--- rsp 1580 // [ parameter n ] 1581 // ... 1582 // [ parameter 1 ] 1583 // [ expression stack ] (caller's java expression stack) 1584 1585 // Assuming that we don't go to one of the trivial specialized 1586 // entries the stack will look like below when we are ready to execute 1587 // the first bytecode (or call the native routine). The register usage 1588 // will be as the template based interpreter expects (see interpreter_x86.hpp). 1589 // 1590 // local variables follow incoming parameters immediately; i.e. 1591 // the return address is moved to the end of the locals). 1592 // 1593 // [ monitor entry ] <--- rsp 1594 // ... 1595 // [ monitor entry ] 1596 // [ expr. stack bottom ] 1597 // [ saved rsi ] 1598 // [ current rdi ] 1599 // [ Method* ] 1600 // [ saved rbp, ] <--- rbp, 1601 // [ return address ] 1602 // [ local variable m ] 1603 // ... 1604 // [ local variable 1 ] 1605 // [ parameter n ] 1606 // ... 1607 // [ parameter 1 ] <--- rdi 1608 1609 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) { 1610 // determine code generation flags 1611 bool synchronized = false; 1612 address entry_point = NULL; 1613 InterpreterGenerator* ig_this = (InterpreterGenerator*)this; 1614 1615 switch (kind) { 1616 case Interpreter::zerolocals : break; 1617 case Interpreter::zerolocals_synchronized: synchronized = true; break; 1618 case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; 1619 case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; 1620 case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; 1621 case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; 1622 case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; 1623 1624 case Interpreter::java_lang_math_sin : // fall thru 1625 case Interpreter::java_lang_math_cos : // fall thru 1626 case Interpreter::java_lang_math_tan : // fall thru 1627 case Interpreter::java_lang_math_abs : // fall thru 1628 case Interpreter::java_lang_math_log : // fall thru 1629 case Interpreter::java_lang_math_log10 : // fall thru 1630 case Interpreter::java_lang_math_sqrt : // fall thru 1631 case Interpreter::java_lang_math_pow : // fall thru 1632 case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; 1633 case Interpreter::java_lang_ref_reference_get 1634 : entry_point = ig_this->generate_Reference_get_entry(); break; 1635 case Interpreter::java_util_zip_CRC32_update 1636 : entry_point = ig_this->generate_CRC32_update_entry(); break; 1637 case Interpreter::java_util_zip_CRC32_updateBytes 1638 : // fall thru 1639 case Interpreter::java_util_zip_CRC32_updateByteBuffer 1640 : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; 1641 default: 1642 fatal(err_msg("unexpected method kind: %d", kind)); 1643 break; 1644 } 1645 1646 if (entry_point) return entry_point; 1647 1648 return ig_this->generate_normal_entry(synchronized); 1649 1650 } 1651 1652 // These should never be compiled since the interpreter will prefer 1653 // the compiled version to the intrinsic version. 1654 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1655 switch (method_kind(m)) { 1656 case Interpreter::java_lang_math_sin : // fall thru 1657 case Interpreter::java_lang_math_cos : // fall thru 1658 case Interpreter::java_lang_math_tan : // fall thru 1659 case Interpreter::java_lang_math_abs : // fall thru 1660 case Interpreter::java_lang_math_log : // fall thru 1661 case Interpreter::java_lang_math_log10 : // fall thru 1662 case Interpreter::java_lang_math_sqrt : // fall thru 1663 case Interpreter::java_lang_math_pow : // fall thru 1664 case Interpreter::java_lang_math_exp : 1665 return false; 1666 default: 1667 return true; 1668 } 1669 } 1670 1671 // How much stack a method activation needs in words. 1672 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1673 1674 const int stub_code = 4; // see generate_call_stub 1675 // Save space for one monitor to get into the interpreted method in case 1676 // the method is synchronized 1677 int monitor_size = method->is_synchronized() ? 1678 1*frame::interpreter_frame_monitor_size() : 0; 1679 1680 // total overhead size: entry_size + (saved rbp, thru expr stack bottom). 1681 // be sure to change this if you add/subtract anything to/from the overhead area 1682 const int overhead_size = -frame::interpreter_frame_initial_sp_offset; 1683 1684 const int method_stack = (method->max_locals() + method->max_stack()) * 1685 Interpreter::stackElementWords; 1686 return overhead_size + method_stack + stub_code; 1687 } 1688 1689 // asm based interpreter deoptimization helpers 1690 1691 int AbstractInterpreter::layout_activation(Method* method, 1692 int tempcount, 1693 int popframe_extra_args, 1694 int moncount, 1695 int caller_actual_parameters, 1696 int callee_param_count, 1697 int callee_locals, 1698 frame* caller, 1699 frame* interpreter_frame, 1700 bool is_top_frame, 1701 bool is_bottom_frame) { 1702 // Note: This calculation must exactly parallel the frame setup 1703 // in AbstractInterpreterGenerator::generate_method_entry. 1704 // If interpreter_frame!=NULL, set up the method, locals, and monitors. 1705 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size, 1706 // as determined by a previous call to this method. 1707 // It is also guaranteed to be walkable even though it is in a skeletal state 1708 // NOTE: return size is in words not bytes 1709 1710 // fixed size of an interpreter frame: 1711 int max_locals = method->max_locals() * Interpreter::stackElementWords; 1712 int extra_locals = (method->max_locals() - method->size_of_parameters()) * 1713 Interpreter::stackElementWords; 1714 1715 int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset; 1716 1717 // Our locals were accounted for by the caller (or last_frame_adjust on the transistion) 1718 // Since the callee parameters already account for the callee's params we only need to account for 1719 // the extra locals. 1720 1721 1722 int size = overhead + 1723 ((callee_locals - callee_param_count)*Interpreter::stackElementWords) + 1724 (moncount*frame::interpreter_frame_monitor_size()) + 1725 tempcount*Interpreter::stackElementWords + popframe_extra_args; 1726 1727 if (interpreter_frame != NULL) { 1728 #ifdef ASSERT 1729 if (!EnableInvokeDynamic) 1730 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences? 1731 // Probably, since deoptimization doesn't work yet. 1732 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable"); 1733 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)"); 1734 #endif 1735 1736 interpreter_frame->interpreter_frame_set_method(method); 1737 // NOTE the difference in using sender_sp and interpreter_frame_sender_sp 1738 // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp) 1739 // and sender_sp is fp+8 1740 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; 1741 1742 #ifdef ASSERT 1743 if (caller->is_interpreted_frame()) { 1744 assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); 1745 } 1746 #endif 1747 1748 interpreter_frame->interpreter_frame_set_locals(locals); 1749 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); 1750 BasicObjectLock* monbot = montop - moncount; 1751 interpreter_frame->interpreter_frame_set_monitor_end(monbot); 1752 1753 // Set last_sp 1754 intptr_t* rsp = (intptr_t*) monbot - 1755 tempcount*Interpreter::stackElementWords - 1756 popframe_extra_args; 1757 interpreter_frame->interpreter_frame_set_last_sp(rsp); 1758 1759 // All frames but the initial (oldest) interpreter frame we fill in have a 1760 // value for sender_sp that allows walking the stack but isn't 1761 // truly correct. Correct the value here. 1762 1763 if (extra_locals != 0 && 1764 interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) { 1765 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals); 1766 } 1767 *interpreter_frame->interpreter_frame_cache_addr() = 1768 method->constants()->cache(); 1769 } 1770 return size; 1771 } 1772 1773 1774 //------------------------------------------------------------------------------------------------------------------------ 1775 // Exceptions 1776 1777 void TemplateInterpreterGenerator::generate_throw_exception() { 1778 // Entry point in previous activation (i.e., if the caller was interpreted) 1779 Interpreter::_rethrow_exception_entry = __ pc(); 1780 const Register thread = rcx; 1781 1782 // Restore sp to interpreter_frame_last_sp even though we are going 1783 // to empty the expression stack for the exception processing. 1784 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1785 // rax,: exception 1786 // rdx: return address/pc that threw exception 1787 __ restore_bcp(); // rsi points to call/send 1788 __ restore_locals(); 1789 1790 // Entry point for exceptions thrown within interpreter code 1791 Interpreter::_throw_exception_entry = __ pc(); 1792 // expression stack is undefined here 1793 // rax,: exception 1794 // rsi: exception bcp 1795 __ verify_oop(rax); 1796 1797 // expression stack must be empty before entering the VM in case of an exception 1798 __ empty_expression_stack(); 1799 __ empty_FPU_stack(); 1800 // find exception handler address and preserve exception oop 1801 __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax); 1802 // rax,: exception handler entry point 1803 // rdx: preserved exception oop 1804 // rsi: bcp for exception handler 1805 __ push_ptr(rdx); // push exception which is now the only value on the stack 1806 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1807 1808 // If the exception is not handled in the current frame the frame is removed and 1809 // the exception is rethrown (i.e. exception continuation is _rethrow_exception). 1810 // 1811 // Note: At this point the bci is still the bxi for the instruction which caused 1812 // the exception and the expression stack is empty. Thus, for any VM calls 1813 // at this point, GC will find a legal oop map (with empty expression stack). 1814 1815 // In current activation 1816 // tos: exception 1817 // rsi: exception bcp 1818 1819 // 1820 // JVMTI PopFrame support 1821 // 1822 1823 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1824 __ empty_expression_stack(); 1825 __ empty_FPU_stack(); 1826 // Set the popframe_processing bit in pending_popframe_condition indicating that we are 1827 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1828 // popframe handling cycles. 1829 __ get_thread(thread); 1830 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1831 __ orl(rdx, JavaThread::popframe_processing_bit); 1832 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1833 1834 { 1835 // Check to see whether we are returning to a deoptimized frame. 1836 // (The PopFrame call ensures that the caller of the popped frame is 1837 // either interpreted or compiled and deoptimizes it if compiled.) 1838 // In this case, we can't call dispatch_next() after the frame is 1839 // popped, but instead must save the incoming arguments and restore 1840 // them after deoptimization has occurred. 1841 // 1842 // Note that we don't compare the return PC against the 1843 // deoptimization blob's unpack entry because of the presence of 1844 // adapter frames in C2. 1845 Label caller_not_deoptimized; 1846 __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize)); 1847 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx); 1848 __ testl(rax, rax); 1849 __ jcc(Assembler::notZero, caller_not_deoptimized); 1850 1851 // Compute size of arguments for saving when returning to deoptimized caller 1852 __ get_method(rax); 1853 __ movptr(rax, Address(rax, Method::const_offset())); 1854 __ load_unsigned_short(rax, Address(rax, ConstMethod::size_of_parameters_offset())); 1855 __ shlptr(rax, Interpreter::logStackElementSize); 1856 __ restore_locals(); 1857 __ subptr(rdi, rax); 1858 __ addptr(rdi, wordSize); 1859 // Save these arguments 1860 __ get_thread(thread); 1861 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi); 1862 1863 __ remove_activation(vtos, rdx, 1864 /* throw_monitor_exception */ false, 1865 /* install_monitor_exception */ false, 1866 /* notify_jvmdi */ false); 1867 1868 // Inform deoptimization that it is responsible for restoring these arguments 1869 __ get_thread(thread); 1870 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit); 1871 1872 // Continue in deoptimization handler 1873 __ jmp(rdx); 1874 1875 __ bind(caller_not_deoptimized); 1876 } 1877 1878 __ remove_activation(vtos, rdx, 1879 /* throw_monitor_exception */ false, 1880 /* install_monitor_exception */ false, 1881 /* notify_jvmdi */ false); 1882 1883 // Finish with popframe handling 1884 // A previous I2C followed by a deoptimization might have moved the 1885 // outgoing arguments further up the stack. PopFrame expects the 1886 // mutations to those outgoing arguments to be preserved and other 1887 // constraints basically require this frame to look exactly as 1888 // though it had previously invoked an interpreted activation with 1889 // no space between the top of the expression stack (current 1890 // last_sp) and the top of stack. Rather than force deopt to 1891 // maintain this kind of invariant all the time we call a small 1892 // fixup routine to move the mutated arguments onto the top of our 1893 // expression stack if necessary. 1894 __ mov(rax, rsp); 1895 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1896 __ get_thread(thread); 1897 // PC must point into interpreter here 1898 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1899 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1900 __ get_thread(thread); 1901 __ reset_last_Java_frame(thread, true, true); 1902 // Restore the last_sp and null it out 1903 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1904 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); 1905 1906 __ restore_bcp(); 1907 __ restore_locals(); 1908 // The method data pointer was incremented already during 1909 // call profiling. We have to restore the mdp for the current bcp. 1910 if (ProfileInterpreter) { 1911 __ set_method_data_pointer_for_bcp(); 1912 } 1913 1914 // Clear the popframe condition flag 1915 __ get_thread(thread); 1916 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive); 1917 1918 #if INCLUDE_JVMTI 1919 if (EnableInvokeDynamic) { 1920 Label L_done; 1921 const Register local0 = rdi; 1922 1923 __ cmpb(Address(rsi, 0), Bytecodes::_invokestatic); 1924 __ jcc(Assembler::notEqual, L_done); 1925 1926 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1927 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1928 1929 __ get_method(rdx); 1930 __ movptr(rax, Address(local0, 0)); 1931 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rsi); 1932 1933 __ testptr(rax, rax); 1934 __ jcc(Assembler::zero, L_done); 1935 1936 __ movptr(Address(rbx, 0), rax); 1937 __ bind(L_done); 1938 } 1939 #endif // INCLUDE_JVMTI 1940 1941 __ dispatch_next(vtos); 1942 // end of PopFrame support 1943 1944 Interpreter::_remove_activation_entry = __ pc(); 1945 1946 // preserve exception over this code sequence 1947 __ pop_ptr(rax); 1948 __ get_thread(thread); 1949 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1950 // remove the activation (without doing throws on illegalMonitorExceptions) 1951 __ remove_activation(vtos, rdx, false, true, false); 1952 // restore exception 1953 __ get_thread(thread); 1954 __ get_vm_result(rax, thread); 1955 1956 // Inbetween activations - previous activation type unknown yet 1957 // compute continuation point - the continuation point expects 1958 // the following registers set up: 1959 // 1960 // rax: exception 1961 // rdx: return address/pc that threw exception 1962 // rsp: expression stack of caller 1963 // rbp: rbp, of caller 1964 __ push(rax); // save exception 1965 __ push(rdx); // save return address 1966 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx); 1967 __ mov(rbx, rax); // save exception handler 1968 __ pop(rdx); // restore return address 1969 __ pop(rax); // restore exception 1970 // Note that an "issuing PC" is actually the next PC after the call 1971 __ jmp(rbx); // jump to exception handler of caller 1972 } 1973 1974 1975 // 1976 // JVMTI ForceEarlyReturn support 1977 // 1978 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1979 address entry = __ pc(); 1980 const Register thread = rcx; 1981 1982 __ restore_bcp(); 1983 __ restore_locals(); 1984 __ empty_expression_stack(); 1985 __ empty_FPU_stack(); 1986 __ load_earlyret_value(state); 1987 1988 __ get_thread(thread); 1989 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1990 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1991 1992 // Clear the earlyret state 1993 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1994 1995 __ remove_activation(state, rsi, 1996 false, /* throw_monitor_exception */ 1997 false, /* install_monitor_exception */ 1998 true); /* notify_jvmdi */ 1999 __ jmp(rsi); 2000 return entry; 2001 } // end of ForceEarlyReturn support 2002 2003 2004 //------------------------------------------------------------------------------------------------------------------------ 2005 // Helper for vtos entry point generation 2006 2007 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 2008 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2009 Label L; 2010 fep = __ pc(); __ push(ftos); __ jmp(L); 2011 dep = __ pc(); __ push(dtos); __ jmp(L); 2012 lep = __ pc(); __ push(ltos); __ jmp(L); 2013 aep = __ pc(); __ push(atos); __ jmp(L); 2014 bep = cep = sep = // fall through 2015 iep = __ pc(); __ push(itos); // fall through 2016 vep = __ pc(); __ bind(L); // fall through 2017 generate_and_dispatch(t); 2018 } 2019 2020 //------------------------------------------------------------------------------------------------------------------------ 2021 // Generation of individual instructions 2022 2023 // helpers for generate_and_dispatch 2024 2025 2026 2027 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 2028 : TemplateInterpreterGenerator(code) { 2029 generate_all(); // down here so it can be "virtual" 2030 } 2031 2032 //------------------------------------------------------------------------------------------------------------------------ 2033 2034 // Non-product code 2035 #ifndef PRODUCT 2036 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2037 address entry = __ pc(); 2038 2039 // prepare expression stack 2040 __ pop(rcx); // pop return address so expression stack is 'pure' 2041 __ push(state); // save tosca 2042 2043 // pass tosca registers as arguments & call tracer 2044 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx); 2045 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 2046 __ pop(state); // restore tosca 2047 2048 // return 2049 __ jmp(rcx); 2050 2051 return entry; 2052 } 2053 2054 2055 void TemplateInterpreterGenerator::count_bytecode() { 2056 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 2057 } 2058 2059 2060 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2061 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 2062 } 2063 2064 2065 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2066 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 2067 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 2068 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 2069 ExternalAddress table((address) BytecodePairHistogram::_counters); 2070 Address index(noreg, rbx, Address::times_4); 2071 __ incrementl(ArrayAddress(table, index)); 2072 } 2073 2074 2075 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2076 // Call a little run-time stub to avoid blow-up for each bytecode. 2077 // The run-time runtime saves the right registers, depending on 2078 // the tosca in-state for the given template. 2079 assert(Interpreter::trace_code(t->tos_in()) != NULL, 2080 "entry must have been generated"); 2081 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 2082 } 2083 2084 2085 void TemplateInterpreterGenerator::stop_interpreter_at() { 2086 Label L; 2087 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 2088 StopInterpreterAt); 2089 __ jcc(Assembler::notEqual, L); 2090 __ int3(); 2091 __ bind(L); 2092 } 2093 #endif // !PRODUCT 2094 #endif // CC_INTERP