1 /* 2 * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/timer.hpp" 47 #include "runtime/vframeArray.hpp" 48 #include "utilities/align.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 // Size of interpreter code. Increase if too small. Interpreter will 53 // fail with a guarantee ("not enough space for interpreter generation"); 54 // if too small. 55 // Run with +PrintInterpreter to get the VM to print out the size. 56 // Max size with JVMTI 57 int TemplateInterpreter::InterpreterCodeSize = 180 * 1024; 58 59 #define __ _masm-> 60 61 //------------------------------------------------------------------------------------------------------------------------ 62 63 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 64 address entry = __ pc(); 65 66 // callee-save register for saving LR, shared with generate_native_entry 67 const Register Rsaved_ret_addr = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0); 68 69 __ mov(Rsaved_ret_addr, LR); 70 71 __ mov(R1, Rmethod); 72 __ mov(R2, Rlocals); 73 __ mov(R3, SP); 74 75 #ifdef AARCH64 76 // expand expr. stack and extended SP to avoid cutting SP in call_VM 77 __ mov(Rstack_top, SP); 78 __ str(Rstack_top, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 79 __ check_stack_top(); 80 81 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), R1, R2, R3, false); 82 83 __ ldp(ZR, c_rarg1, Address(SP, 2*wordSize, post_indexed)); 84 __ ldp(c_rarg2, c_rarg3, Address(SP, 2*wordSize, post_indexed)); 85 __ ldp(c_rarg4, c_rarg5, Address(SP, 2*wordSize, post_indexed)); 86 __ ldp(c_rarg6, c_rarg7, Address(SP, 2*wordSize, post_indexed)); 87 88 __ ldp_d(V0, V1, Address(SP, 2*wordSize, post_indexed)); 89 __ ldp_d(V2, V3, Address(SP, 2*wordSize, post_indexed)); 90 __ ldp_d(V4, V5, Address(SP, 2*wordSize, post_indexed)); 91 __ ldp_d(V6, V7, Address(SP, 2*wordSize, post_indexed)); 92 #else 93 94 // Safer to save R9 (when scratched) since callers may have been 95 // written assuming R9 survives. This is suboptimal but 96 // probably not important for this slow case call site. 97 // Note for R9 saving: slow_signature_handler may copy register 98 // arguments above the current SP (passed as R3). It is safe for 99 // call_VM to use push and pop to protect additional values on the 100 // stack if needed. 101 __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/); 102 __ add(SP, SP, wordSize); // Skip R0 103 __ pop(RegisterSet(R1, R3)); // Load arguments passed in registers 104 #ifdef __ABI_HARD__ 105 // Few alternatives to an always-load-FP-registers approach: 106 // - parse method signature to detect FP arguments 107 // - keep a counter/flag on a stack indicationg number of FP arguments in the method. 108 // The later has been originally implemented and tested but a conditional path could 109 // eliminate any gain imposed by avoiding 8 double word loads. 110 __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback); 111 #endif // __ABI_HARD__ 112 #endif // AARCH64 113 114 __ ret(Rsaved_ret_addr); 115 116 return entry; 117 } 118 119 120 // 121 // Various method entries (that c++ and asm interpreter agree upon) 122 //------------------------------------------------------------------------------------------------------------------------ 123 // 124 // 125 126 // Abstract method entry 127 // Attempt to execute abstract method. Throw exception 128 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 129 address entry_point = __ pc(); 130 131 #ifdef AARCH64 132 __ restore_sp_after_call(Rtemp); 133 __ restore_stack_top(); 134 #endif 135 136 __ empty_expression_stack(); 137 138 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 139 140 DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here 141 return entry_point; 142 } 143 144 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 145 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 146 147 // TODO: ARM 148 return NULL; 149 150 address entry_point = __ pc(); 151 STOP("generate_math_entry"); 152 return entry_point; 153 } 154 155 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 156 address entry = __ pc(); 157 158 // Note: There should be a minimal interpreter frame set up when stack 159 // overflow occurs since we check explicitly for it now. 160 // 161 #ifdef ASSERT 162 { Label L; 163 __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize); 164 __ cmp(SP, Rtemp); // Rtemp = maximal SP for current FP, 165 // (stack grows negative) 166 __ b(L, ls); // check if frame is complete 167 __ stop ("interpreter frame not set up"); 168 __ bind(L); 169 } 170 #endif // ASSERT 171 172 // Restore bcp under the assumption that the current frame is still 173 // interpreted 174 __ restore_bcp(); 175 176 // expression stack must be empty before entering the VM if an exception 177 // happened 178 __ empty_expression_stack(); 179 180 // throw exception 181 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 182 183 __ should_not_reach_here(); 184 185 return entry; 186 } 187 188 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 189 address entry = __ pc(); 190 191 // index is in R4_ArrayIndexOutOfBounds_index 192 193 InlinedString Lname(name); 194 195 // expression stack must be empty before entering the VM if an exception happened 196 __ empty_expression_stack(); 197 198 // setup parameters 199 __ ldr_literal(R1, Lname); 200 __ mov(R2, R4_ArrayIndexOutOfBounds_index); 201 202 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2); 203 204 __ nop(); // to avoid filling CPU pipeline with invalid instructions 205 __ nop(); 206 __ should_not_reach_here(); 207 __ bind_literal(Lname); 208 209 return entry; 210 } 211 212 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 213 address entry = __ pc(); 214 215 // object is in R2_ClassCastException_obj 216 217 // expression stack must be empty before entering the VM if an exception 218 // happened 219 __ empty_expression_stack(); 220 221 __ mov(R1, R2_ClassCastException_obj); 222 __ call_VM(noreg, 223 CAST_FROM_FN_PTR(address, 224 InterpreterRuntime::throw_ClassCastException), 225 R1); 226 227 __ should_not_reach_here(); 228 229 return entry; 230 } 231 232 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 233 assert(!pass_oop || message == NULL, "either oop or message but not both"); 234 address entry = __ pc(); 235 236 InlinedString Lname(name); 237 InlinedString Lmessage(message); 238 239 if (pass_oop) { 240 // object is at TOS 241 __ pop_ptr(R2); 242 } 243 244 // expression stack must be empty before entering the VM if an exception happened 245 __ empty_expression_stack(); 246 247 // setup parameters 248 __ ldr_literal(R1, Lname); 249 250 if (pass_oop) { 251 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2); 252 } else { 253 if (message != NULL) { 254 __ ldr_literal(R2, Lmessage); 255 } else { 256 __ mov(R2, 0); 257 } 258 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2); 259 } 260 261 // throw exception 262 __ b(Interpreter::throw_exception_entry()); 263 264 __ nop(); // to avoid filling CPU pipeline with invalid instructions 265 __ nop(); 266 __ bind_literal(Lname); 267 if (!pass_oop && (message != NULL)) { 268 __ bind_literal(Lmessage); 269 } 270 271 return entry; 272 } 273 274 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 275 address entry = __ pc(); 276 277 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 278 279 #ifdef AARCH64 280 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 281 __ restore_stack_top(); 282 #else 283 // Restore stack bottom in case i2c adjusted stack 284 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 285 // and NULL it as marker that SP is now tos until next java call 286 __ mov(Rtemp, (int)NULL_WORD); 287 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 288 #endif // AARCH64 289 290 __ restore_method(); 291 __ restore_bcp(); 292 __ restore_dispatch(); 293 __ restore_locals(); 294 295 const Register Rcache = R2_tmp; 296 const Register Rindex = R3_tmp; 297 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); 298 299 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 300 __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 301 __ check_stack_top(); 302 __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize)); 303 304 #ifndef AARCH64 305 __ convert_retval_to_tos(state); 306 #endif // !AARCH64 307 308 __ check_and_handle_popframe(); 309 __ check_and_handle_earlyret(); 310 311 __ dispatch_next(state, step); 312 313 return entry; 314 } 315 316 317 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 318 address entry = __ pc(); 319 320 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 321 322 #ifdef AARCH64 323 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 324 __ restore_stack_top(); 325 #else 326 // The stack is not extended by deopt but we must NULL last_sp as this 327 // entry is like a "return". 328 __ mov(Rtemp, 0); 329 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 330 #endif // AARCH64 331 332 __ restore_method(); 333 __ restore_bcp(); 334 __ restore_dispatch(); 335 __ restore_locals(); 336 337 // handle exceptions 338 { Label L; 339 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 340 __ cbz(Rtemp, L); 341 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 342 __ should_not_reach_here(); 343 __ bind(L); 344 } 345 346 if (continuation == NULL) { 347 __ dispatch_next(state, step); 348 } else { 349 __ jump_to_entry(continuation); 350 } 351 352 return entry; 353 } 354 355 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 356 #ifdef AARCH64 357 address entry = __ pc(); 358 switch (type) { 359 case T_BOOLEAN: 360 __ tst(R0, 0xff); 361 __ cset(R0, ne); 362 break; 363 case T_CHAR : __ zero_extend(R0, R0, 16); break; 364 case T_BYTE : __ sign_extend(R0, R0, 8); break; 365 case T_SHORT : __ sign_extend(R0, R0, 16); break; 366 case T_INT : // fall through 367 case T_LONG : // fall through 368 case T_VOID : // fall through 369 case T_FLOAT : // fall through 370 case T_DOUBLE : /* nothing to do */ break; 371 case T_OBJECT : 372 // retrieve result from frame 373 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 374 // and verify it 375 __ verify_oop(R0); 376 break; 377 default : ShouldNotReachHere(); 378 } 379 __ ret(); 380 return entry; 381 #else 382 // Result handlers are not used on 32-bit ARM 383 // since the returned value is already in appropriate format. 384 __ should_not_reach_here(); // to avoid empty code block 385 386 // The result handler non-zero indicates an object is returned and this is 387 // used in the native entry code. 388 return type == T_OBJECT ? (address)(-1) : NULL; 389 #endif // AARCH64 390 } 391 392 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 393 address entry = __ pc(); 394 __ push(state); 395 __ call_VM(noreg, runtime_entry); 396 397 // load current bytecode 398 __ ldrb(R3_bytecode, Address(Rbcp)); 399 __ dispatch_only_normal(vtos); 400 return entry; 401 } 402 403 404 // Helpers for commoning out cases in the various type of method entries. 405 // 406 407 // increment invocation count & check for overflow 408 // 409 // Note: checking for negative value instead of overflow 410 // so we have a 'sticky' overflow test 411 // 412 // In: Rmethod. 413 // 414 // Uses R0, R1, Rtemp. 415 // 416 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, 417 Label* profile_method, 418 Label* profile_method_continue) { 419 Label done; 420 const Register Rcounters = Rtemp; 421 const Address invocation_counter(Rcounters, 422 MethodCounters::invocation_counter_offset() + 423 InvocationCounter::counter_offset()); 424 425 // Note: In tiered we increment either counters in MethodCounters* or 426 // in MDO depending if we're profiling or not. 427 if (TieredCompilation) { 428 int increment = InvocationCounter::count_increment; 429 Label no_mdo; 430 if (ProfileInterpreter) { 431 // Are we profiling? 432 __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset())); 433 __ cbz(R1_tmp, no_mdo); 434 // Increment counter in the MDO 435 const Address mdo_invocation_counter(R1_tmp, 436 in_bytes(MethodData::invocation_counter_offset()) + 437 in_bytes(InvocationCounter::counter_offset())); 438 const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset())); 439 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow); 440 __ b(done); 441 } 442 __ bind(no_mdo); 443 __ get_method_counters(Rmethod, Rcounters, done); 444 const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset())); 445 __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow); 446 __ bind(done); 447 } else { // not TieredCompilation 448 const Address backedge_counter(Rcounters, 449 MethodCounters::backedge_counter_offset() + 450 InvocationCounter::counter_offset()); 451 452 const Register Ricnt = R0_tmp; // invocation counter 453 const Register Rbcnt = R1_tmp; // backedge counter 454 455 __ get_method_counters(Rmethod, Rcounters, done); 456 457 if (ProfileInterpreter) { 458 const Register Riic = R1_tmp; 459 __ ldr_s32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 460 __ add(Riic, Riic, 1); 461 __ str_32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 462 } 463 464 // Update standard invocation counters 465 466 __ ldr_u32(Ricnt, invocation_counter); 467 __ ldr_u32(Rbcnt, backedge_counter); 468 469 __ add(Ricnt, Ricnt, InvocationCounter::count_increment); 470 471 #ifdef AARCH64 472 __ andr(Rbcnt, Rbcnt, (unsigned int)InvocationCounter::count_mask_value); // mask out the status bits 473 #else 474 __ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits 475 #endif // AARCH64 476 477 __ str_32(Ricnt, invocation_counter); // save invocation count 478 __ add(Ricnt, Ricnt, Rbcnt); // add both counters 479 480 // profile_method is non-null only for interpreted method so 481 // profile_method != NULL == !native_call 482 // BytecodeInterpreter only calls for native so code is elided. 483 484 if (ProfileInterpreter && profile_method != NULL) { 485 assert(profile_method_continue != NULL, "should be non-null"); 486 487 // Test to see if we should create a method data oop 488 // Reuse R1_tmp as we don't need backedge counters anymore. 489 Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 490 __ ldr_s32(R1_tmp, profile_limit); 491 __ cmp_32(Ricnt, R1_tmp); 492 __ b(*profile_method_continue, lt); 493 494 // if no method data exists, go to profile_method 495 __ test_method_data_pointer(R1_tmp, *profile_method); 496 } 497 498 Address invoke_limit(Rcounters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 499 __ ldr_s32(R1_tmp, invoke_limit); 500 __ cmp_32(Ricnt, R1_tmp); 501 __ b(*overflow, hs); 502 __ bind(done); 503 } 504 } 505 506 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 507 // InterpreterRuntime::frequency_counter_overflow takes one argument 508 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 509 // The call returns the address of the verified entry point for the method or NULL 510 // if the compilation did not complete (either went background or bailed out). 511 __ mov(R1, (int)false); 512 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1); 513 514 // jump to the interpreted entry. 515 __ b(do_continue); 516 } 517 518 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 519 // Check if we've got enough room on the stack for 520 // - overhead; 521 // - locals; 522 // - expression stack. 523 // 524 // Registers on entry: 525 // 526 // R3 = number of additional locals 527 // R11 = max expression stack slots (AArch64 only) 528 // Rthread 529 // Rmethod 530 // Registers used: R0, R1, R2, Rtemp. 531 532 const Register Radditional_locals = R3; 533 const Register RmaxStack = AARCH64_ONLY(R11) NOT_AARCH64(R2); 534 535 // monitor entry size 536 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 537 538 // total overhead size: entry_size + (saved registers, thru expr stack bottom). 539 // be sure to change this if you add/subtract anything to/from the overhead area 540 const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size; 541 542 // Pages reserved for VM runtime calls and subsequent Java calls. 543 const int reserved_pages = JavaThread::stack_shadow_zone_size(); 544 545 // Thread::stack_size() includes guard pages, and they should not be touched. 546 const int guard_pages = JavaThread::stack_guard_zone_size(); 547 548 __ ldr(R0, Address(Rthread, Thread::stack_base_offset())); 549 __ ldr(R1, Address(Rthread, Thread::stack_size_offset())); 550 #ifndef AARCH64 551 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 552 __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset())); 553 #endif // !AARCH64 554 __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words()); 555 556 // reserve space for additional locals 557 __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize)); 558 559 // stack size 560 __ sub(R0, R0, R1); 561 562 // reserve space for expression stack 563 __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 564 565 __ cmp(Rtemp, R0); 566 567 #ifdef AARCH64 568 Label L; 569 __ b(L, hi); 570 __ mov(SP, Rsender_sp); // restore SP 571 __ b(StubRoutines::throw_StackOverflowError_entry()); 572 __ bind(L); 573 #else 574 __ mov(SP, Rsender_sp, ls); // restore SP 575 __ b(StubRoutines::throw_StackOverflowError_entry(), ls); 576 #endif // AARCH64 577 } 578 579 580 // Allocate monitor and lock method (asm interpreter) 581 // 582 void TemplateInterpreterGenerator::lock_method() { 583 // synchronize method 584 585 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 586 assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment"); 587 588 #ifdef ASSERT 589 { Label L; 590 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 591 __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 592 __ stop("method doesn't need synchronization"); 593 __ bind(L); 594 } 595 #endif // ASSERT 596 597 // get synchronization object 598 { Label done; 599 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 600 #ifdef AARCH64 601 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 602 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, done); 603 #else 604 __ tst(Rtemp, JVM_ACC_STATIC); 605 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case) 606 __ b(done, eq); 607 #endif // AARCH64 608 __ load_mirror(R0, Rmethod, Rtemp); 609 __ bind(done); 610 } 611 612 // add space for monitor & lock 613 614 #ifdef AARCH64 615 __ check_extended_sp(Rtemp); 616 __ sub(SP, SP, entry_size); // adjust extended SP 617 __ mov(Rtemp, SP); 618 __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 619 #endif // AARCH64 620 621 __ sub(Rstack_top, Rstack_top, entry_size); 622 __ check_stack_top_on_expansion(); 623 // add space for a monitor entry 624 __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 625 // set new monitor block top 626 __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset_in_bytes())); 627 // store object 628 __ mov(R1, Rstack_top); // monitor entry address 629 __ lock_object(R1); 630 } 631 632 #ifdef AARCH64 633 634 // 635 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 636 // and for native methods hence the shared code. 637 // 638 // On entry: 639 // R10 = ConstMethod 640 // R11 = max expr. stack (in slots), if !native_call 641 // 642 // On exit: 643 // Rbcp, Rstack_top are initialized, SP is extended 644 // 645 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 646 // Incoming registers 647 const Register RconstMethod = R10; 648 const Register RmaxStack = R11; 649 // Temporary registers 650 const Register RextendedSP = R0; 651 const Register Rcache = R1; 652 const Register Rmdp = ProfileInterpreter ? R2 : ZR; 653 654 // Generates the following stack layout (stack grows up in this picture): 655 // 656 // [ expr. stack bottom ] 657 // [ saved Rbcp ] 658 // [ current Rlocals ] 659 // [ cache ] 660 // [ mdx ] 661 // [ mirror ] 662 // [ Method* ] 663 // [ extended SP ] 664 // [ expr. stack top ] 665 // [ sender_sp ] 666 // [ saved FP ] <--- FP 667 // [ saved LR ] 668 669 // initialize fixed part of activation frame 670 __ stp(FP, LR, Address(SP, -2*wordSize, pre_indexed)); 671 __ mov(FP, SP); // establish new FP 672 673 // setup Rbcp 674 if (native_call) { 675 __ mov(Rbcp, ZR); // bcp = 0 for native calls 676 } else { 677 __ add(Rbcp, RconstMethod, in_bytes(ConstMethod::codes_offset())); // get codebase 678 } 679 680 // Rstack_top & RextendedSP 681 __ sub(Rstack_top, SP, 10*wordSize); 682 if (native_call) { 683 __ sub(RextendedSP, Rstack_top, align_up(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling 684 } else { 685 __ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 686 __ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes); 687 } 688 __ mov(SP, RextendedSP); 689 __ check_stack_top(); 690 691 // Load Rmdp 692 if (ProfileInterpreter) { 693 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 694 __ tst(Rtemp, Rtemp); 695 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset())); 696 __ csel(Rmdp, ZR, Rtemp, eq); 697 } 698 699 // Load Rcache 700 __ ldr(Rtemp, Address(RconstMethod, ConstMethod::constants_offset())); 701 __ ldr(Rcache, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 702 // Get mirror and store it in the frame as GC root for this Method* 703 __ load_mirror(Rtemp, Rmethod, Rtemp); 704 705 // Build fixed frame 706 __ stp(Rstack_top, Rbcp, Address(FP, -10*wordSize)); 707 __ stp(Rlocals, Rcache, Address(FP, -8*wordSize)); 708 __ stp(Rmdp, Rtemp, Address(FP, -6*wordSize)); 709 __ stp(Rmethod, RextendedSP, Address(FP, -4*wordSize)); 710 __ stp(ZR, Rsender_sp, Address(FP, -2*wordSize)); 711 assert(frame::interpreter_frame_initial_sp_offset == -10, "interpreter frame broken"); 712 assert(frame::interpreter_frame_stack_top_offset == -2, "stack top broken"); 713 } 714 715 #else // AARCH64 716 717 // 718 // Generate a fixed interpreter frame. This is identical setup for interpreted methods 719 // and for native methods hence the shared code. 720 721 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 722 // Generates the following stack layout: 723 // 724 // [ expr. stack bottom ] 725 // [ saved Rbcp ] 726 // [ current Rlocals ] 727 // [ cache ] 728 // [ mdx ] 729 // [ Method* ] 730 // [ last_sp ] 731 // [ sender_sp ] 732 // [ saved FP ] <--- FP 733 // [ saved LR ] 734 735 // initialize fixed part of activation frame 736 __ push(LR); // save return address 737 __ push(FP); // save FP 738 __ mov(FP, SP); // establish new FP 739 740 __ push(Rsender_sp); 741 742 __ mov(R0, 0); 743 __ push(R0); // leave last_sp as null 744 745 // setup Rbcp 746 if (native_call) { 747 __ mov(Rbcp, 0); // bcp = 0 for native calls 748 } else { 749 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod* 750 __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase 751 } 752 753 __ push(Rmethod); // save Method* 754 // Get mirror and store it in the frame as GC root for this Method* 755 __ load_mirror(Rtemp, Rmethod, Rtemp); 756 __ push(Rtemp); 757 758 if (ProfileInterpreter) { 759 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 760 __ tst(Rtemp, Rtemp); 761 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne); 762 __ push(Rtemp); // set the mdp (method data pointer) 763 } else { 764 __ push(R0); 765 } 766 767 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 768 __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset())); 769 __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 770 __ push(Rtemp); // set constant pool cache 771 __ push(Rlocals); // set locals pointer 772 __ push(Rbcp); // set bcp 773 __ push(R0); // reserve word for pointer to expression stack bottom 774 __ str(SP, Address(SP, 0)); // set expression stack bottom 775 } 776 777 #endif // AARCH64 778 779 // End of helpers 780 781 //------------------------------------------------------------------------------------------------------------------------ 782 // Entry points 783 // 784 // Here we generate the various kind of entries into the interpreter. 785 // The two main entry type are generic bytecode methods and native call method. 786 // These both come in synchronized and non-synchronized versions but the 787 // frame layout they create is very similar. The other method entry 788 // types are really just special purpose entries that are really entry 789 // and interpretation all in one. These are for trivial methods like 790 // accessor, empty, or special math methods. 791 // 792 // When control flow reaches any of the entry types for the interpreter 793 // the following holds -> 794 // 795 // Arguments: 796 // 797 // Rmethod: Method* 798 // Rthread: thread 799 // Rsender_sp: sender sp 800 // Rparams (SP on 32-bit ARM): pointer to method parameters 801 // 802 // LR: return address 803 // 804 // Stack layout immediately at entry 805 // 806 // [ optional padding(*)] <--- SP (AArch64) 807 // [ parameter n ] <--- Rparams (SP on 32-bit ARM) 808 // ... 809 // [ parameter 1 ] 810 // [ expression stack ] (caller's java expression stack) 811 812 // Assuming that we don't go to one of the trivial specialized 813 // entries the stack will look like below when we are ready to execute 814 // the first bytecode (or call the native routine). The register usage 815 // will be as the template based interpreter expects. 816 // 817 // local variables follow incoming parameters immediately; i.e. 818 // the return address is saved at the end of the locals. 819 // 820 // [ reserved stack (*) ] <--- SP (AArch64) 821 // [ expr. stack ] <--- Rstack_top (SP on 32-bit ARM) 822 // [ monitor entry ] 823 // ... 824 // [ monitor entry ] 825 // [ expr. stack bottom ] 826 // [ saved Rbcp ] 827 // [ current Rlocals ] 828 // [ cache ] 829 // [ mdx ] 830 // [ mirror ] 831 // [ Method* ] 832 // 833 // 32-bit ARM: 834 // [ last_sp ] 835 // 836 // AArch64: 837 // [ extended SP (*) ] 838 // [ stack top (*) ] 839 // 840 // [ sender_sp ] 841 // [ saved FP ] <--- FP 842 // [ saved LR ] 843 // [ optional padding(*)] 844 // [ local variable m ] 845 // ... 846 // [ local variable 1 ] 847 // [ parameter n ] 848 // ... 849 // [ parameter 1 ] <--- Rlocals 850 // 851 // (*) - AArch64 only 852 // 853 854 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 855 #if INCLUDE_ALL_GCS 856 if (UseG1GC) { 857 // Code: _aload_0, _getfield, _areturn 858 // parameter size = 1 859 // 860 // The code that gets generated by this routine is split into 2 parts: 861 // 1. The "intrinsified" code for G1 (or any SATB based GC), 862 // 2. The slow path - which is an expansion of the regular method entry. 863 // 864 // Notes:- 865 // * In the G1 code we do not check whether we need to block for 866 // a safepoint. If G1 is enabled then we must execute the specialized 867 // code for Reference.get (except when the Reference object is null) 868 // so that we can log the value in the referent field with an SATB 869 // update buffer. 870 // If the code for the getfield template is modified so that the 871 // G1 pre-barrier code is executed when the current method is 872 // Reference.get() then going through the normal method entry 873 // will be fine. 874 // * The G1 code can, however, check the receiver object (the instance 875 // of java.lang.Reference) and jump to the slow path if null. If the 876 // Reference object is null then we obviously cannot fetch the referent 877 // and so we don't need to call the G1 pre-barrier. Thus we can use the 878 // regular method entry code to generate the NPE. 879 // 880 // This code is based on generate_accessor_enty. 881 // 882 // Rmethod: Method* 883 // Rthread: thread 884 // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path 885 // Rparams: parameters 886 887 address entry = __ pc(); 888 Label slow_path; 889 const Register Rthis = R0; 890 const Register Rret_addr = Rtmp_save1; 891 assert_different_registers(Rthis, Rret_addr, Rsender_sp); 892 893 const int referent_offset = java_lang_ref_Reference::referent_offset; 894 guarantee(referent_offset > 0, "referent offset not initialized"); 895 896 // Check if local 0 != NULL 897 // If the receiver is null then it is OK to jump to the slow path. 898 __ ldr(Rthis, Address(Rparams)); 899 __ cbz(Rthis, slow_path); 900 901 // Generate the G1 pre-barrier code to log the value of 902 // the referent field in an SATB buffer. 903 904 // Load the value of the referent field. 905 __ load_heap_oop(R0, Address(Rthis, referent_offset)); 906 907 // Preserve LR 908 __ mov(Rret_addr, LR); 909 910 __ g1_write_barrier_pre(noreg, // store_addr 911 noreg, // new_val 912 R0, // pre_val 913 Rtemp, // tmp1 914 R1_tmp); // tmp2 915 916 // _areturn 917 __ mov(SP, Rsender_sp); 918 __ ret(Rret_addr); 919 920 // generate a vanilla interpreter entry as the slow path 921 __ bind(slow_path); 922 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 923 return entry; 924 } 925 #endif // INCLUDE_ALL_GCS 926 927 // If G1 is not enabled then attempt to go through the normal entry point 928 return NULL; 929 } 930 931 // Not supported 932 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; } 933 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 934 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 935 936 // 937 // Interpreter stub for calling a native method. (asm interpreter) 938 // This sets up a somewhat different looking stack for calling the native method 939 // than the typical interpreter frame setup. 940 // 941 942 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 943 // determine code generation flags 944 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 945 946 // Incoming registers: 947 // 948 // Rmethod: Method* 949 // Rthread: thread 950 // Rsender_sp: sender sp 951 // Rparams: parameters 952 953 address entry_point = __ pc(); 954 955 // Register allocation 956 const Register Rsize_of_params = AARCH64_ONLY(R20) NOT_AARCH64(R6); 957 const Register Rsig_handler = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0 /* R4 */); 958 const Register Rnative_code = AARCH64_ONLY(R22) NOT_AARCH64(Rtmp_save1 /* R5 */); 959 const Register Rresult_handler = AARCH64_ONLY(Rsig_handler) NOT_AARCH64(R6); 960 961 #ifdef AARCH64 962 const Register RconstMethod = R10; // also used in generate_fixed_frame (should match) 963 const Register Rsaved_result = Rnative_code; 964 const FloatRegister Dsaved_result = V8; 965 #else 966 const Register Rsaved_result_lo = Rtmp_save0; // R4 967 const Register Rsaved_result_hi = Rtmp_save1; // R5 968 FloatRegister saved_result_fp; 969 #endif // AARCH64 970 971 972 #ifdef AARCH64 973 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 974 __ ldrh(Rsize_of_params, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 975 #else 976 __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset())); 977 __ ldrh(Rsize_of_params, Address(Rsize_of_params, ConstMethod::size_of_parameters_offset())); 978 #endif // AARCH64 979 980 // native calls don't need the stack size check since they have no expression stack 981 // and the arguments are already on the stack and we only add a handful of words 982 // to the stack 983 984 // compute beginning of parameters (Rlocals) 985 __ sub(Rlocals, Rparams, wordSize); 986 __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize)); 987 988 #ifdef AARCH64 989 int extra_stack_reserve = 2*wordSize; // extra space for oop_temp 990 if(__ can_post_interpreter_events()) { 991 // extra space for saved results 992 extra_stack_reserve += 2*wordSize; 993 } 994 // reserve extra stack space and nullify oop_temp slot 995 __ stp(ZR, ZR, Address(SP, -extra_stack_reserve, pre_indexed)); 996 #else 997 // reserve stack space for oop_temp 998 __ mov(R0, 0); 999 __ push(R0); 1000 #endif // AARCH64 1001 1002 generate_fixed_frame(true); // Note: R9 is now saved in the frame 1003 1004 // make sure method is native & not abstract 1005 #ifdef ASSERT 1006 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1007 { 1008 Label L; 1009 __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1010 __ stop("tried to execute non-native method as native"); 1011 __ bind(L); 1012 } 1013 { Label L; 1014 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1015 __ stop("tried to execute abstract method in interpreter"); 1016 __ bind(L); 1017 } 1018 #endif 1019 1020 // increment invocation count & check for overflow 1021 Label invocation_counter_overflow; 1022 if (inc_counter) { 1023 if (synchronized) { 1024 // Avoid unlocking method's monitor in case of exception, as it has not 1025 // been locked yet. 1026 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1027 } 1028 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1029 } 1030 1031 Label continue_after_compile; 1032 __ bind(continue_after_compile); 1033 1034 if (inc_counter && synchronized) { 1035 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1036 } 1037 1038 // check for synchronized methods 1039 // Must happen AFTER invocation_counter check and stack overflow check, 1040 // so method is not locked if overflows. 1041 // 1042 if (synchronized) { 1043 lock_method(); 1044 } else { 1045 // no synchronization necessary 1046 #ifdef ASSERT 1047 { Label L; 1048 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1049 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1050 __ stop("method needs synchronization"); 1051 __ bind(L); 1052 } 1053 #endif 1054 } 1055 1056 // start execution 1057 #ifdef ASSERT 1058 { Label L; 1059 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1060 __ cmp(Rtemp, Rstack_top); 1061 __ b(L, eq); 1062 __ stop("broken stack frame setup in interpreter"); 1063 __ bind(L); 1064 } 1065 #endif 1066 __ check_extended_sp(Rtemp); 1067 1068 // jvmti/dtrace support 1069 __ notify_method_entry(); 1070 #if R9_IS_SCRATCHED 1071 __ restore_method(); 1072 #endif 1073 1074 { 1075 Label L; 1076 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1077 __ cbnz(Rsig_handler, L); 1078 __ mov(R1, Rmethod); 1079 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true); 1080 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1081 __ bind(L); 1082 } 1083 1084 { 1085 Label L; 1086 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1087 __ cbnz(Rnative_code, L); 1088 __ mov(R1, Rmethod); 1089 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1); 1090 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1091 __ bind(L); 1092 } 1093 1094 // Allocate stack space for arguments 1095 1096 #ifdef AARCH64 1097 __ sub(Rtemp, SP, Rsize_of_params, ex_uxtw, LogBytesPerWord); 1098 __ align_reg(SP, Rtemp, StackAlignmentInBytes); 1099 1100 // Allocate more stack space to accomodate all arguments passed on GP and FP registers: 1101 // 8 * wordSize for GPRs 1102 // 8 * wordSize for FPRs 1103 int reg_arguments = align_up(8*wordSize + 8*wordSize, StackAlignmentInBytes); 1104 #else 1105 1106 // C functions need aligned stack 1107 __ bic(SP, SP, StackAlignmentInBytes - 1); 1108 // Multiply by BytesPerLong instead of BytesPerWord, because calling convention 1109 // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong) 1110 __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong)); 1111 1112 #ifdef __ABI_HARD__ 1113 // Allocate more stack space to accomodate all GP as well as FP registers: 1114 // 4 * wordSize 1115 // 8 * BytesPerLong 1116 int reg_arguments = align_up((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes); 1117 #else 1118 // Reserve at least 4 words on the stack for loading 1119 // of parameters passed on registers (R0-R3). 1120 // See generate_slow_signature_handler(). 1121 // It is also used for JNIEnv & class additional parameters. 1122 int reg_arguments = 4 * wordSize; 1123 #endif // __ABI_HARD__ 1124 #endif // AARCH64 1125 1126 __ sub(SP, SP, reg_arguments); 1127 1128 1129 // Note: signature handler blows R4 (32-bit ARM) or R21 (AArch64) besides all scratch registers. 1130 // See AbstractInterpreterGenerator::generate_slow_signature_handler(). 1131 __ call(Rsig_handler); 1132 #if R9_IS_SCRATCHED 1133 __ restore_method(); 1134 #endif 1135 __ mov(Rresult_handler, R0); 1136 1137 // Pass JNIEnv and mirror for static methods 1138 { 1139 Label L; 1140 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1141 __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset())); 1142 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L); 1143 __ load_mirror(Rtemp, Rmethod, Rtemp); 1144 __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize); 1145 __ str(Rtemp, Address(R1, 0)); 1146 __ bind(L); 1147 } 1148 1149 __ set_last_Java_frame(SP, FP, true, Rtemp); 1150 1151 // Changing state to _thread_in_native must be the last thing to do 1152 // before the jump to native code. At this moment stack must be 1153 // safepoint-safe and completely prepared for stack walking. 1154 #ifdef ASSERT 1155 { 1156 Label L; 1157 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1158 __ cmp_32(Rtemp, _thread_in_Java); 1159 __ b(L, eq); 1160 __ stop("invalid thread state"); 1161 __ bind(L); 1162 } 1163 #endif 1164 1165 #ifdef AARCH64 1166 __ mov(Rtemp, _thread_in_native); 1167 __ add(Rtemp2, Rthread, in_bytes(JavaThread::thread_state_offset())); 1168 // STLR is used to force all preceding writes to be observed prior to thread state change 1169 __ stlr_w(Rtemp, Rtemp2); 1170 #else 1171 // Force all preceding writes to be observed prior to thread state change 1172 __ membar(MacroAssembler::StoreStore, Rtemp); 1173 1174 __ mov(Rtemp, _thread_in_native); 1175 __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1176 #endif // AARCH64 1177 1178 __ call(Rnative_code); 1179 #if R9_IS_SCRATCHED 1180 __ restore_method(); 1181 #endif 1182 1183 // Set FPSCR/FPCR to a known state 1184 if (AlwaysRestoreFPU) { 1185 __ restore_default_fp_mode(); 1186 } 1187 1188 // Do safepoint check 1189 __ mov(Rtemp, _thread_in_native_trans); 1190 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1191 1192 // Force this write out before the read below 1193 __ membar(MacroAssembler::StoreLoad, Rtemp); 1194 1195 __ ldr_global_s32(Rtemp, SafepointSynchronize::address_of_state()); 1196 1197 // Protect the return value in the interleaved code: save it to callee-save registers. 1198 #ifdef AARCH64 1199 __ mov(Rsaved_result, R0); 1200 __ fmov_d(Dsaved_result, D0); 1201 #else 1202 __ mov(Rsaved_result_lo, R0); 1203 __ mov(Rsaved_result_hi, R1); 1204 #ifdef __ABI_HARD__ 1205 // preserve native FP result in a callee-saved register 1206 saved_result_fp = D8; 1207 __ fcpyd(saved_result_fp, D0); 1208 #else 1209 saved_result_fp = fnoreg; 1210 #endif // __ABI_HARD__ 1211 #endif // AARCH64 1212 1213 { 1214 __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset())); 1215 __ cmp(Rtemp, SafepointSynchronize::_not_synchronized); 1216 __ cond_cmp(R3, 0, eq); 1217 1218 #ifdef AARCH64 1219 Label L; 1220 __ b(L, eq); 1221 __ mov(R0, Rthread); 1222 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none); 1223 __ bind(L); 1224 #else 1225 __ mov(R0, Rthread, ne); 1226 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none, ne); 1227 #if R9_IS_SCRATCHED 1228 __ restore_method(); 1229 #endif 1230 #endif // AARCH64 1231 } 1232 1233 // Perform Native->Java thread transition 1234 __ mov(Rtemp, _thread_in_Java); 1235 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1236 1237 // Zero handles and last_java_sp 1238 __ reset_last_Java_frame(Rtemp); 1239 __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset())); 1240 __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset_in_bytes())); 1241 if (CheckJNICalls) { 1242 __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset())); 1243 } 1244 1245 // Unbox oop result, e.g. JNIHandles::resolve result if it's an oop. 1246 { 1247 Label Lnot_oop; 1248 #ifdef AARCH64 1249 __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT)); 1250 __ cmp(Rresult_handler, Rtemp); 1251 __ b(Lnot_oop, ne); 1252 #else // !AARCH64 1253 // For ARM32, Rresult_handler is -1 for oop result, 0 otherwise. 1254 __ cbz(Rresult_handler, Lnot_oop); 1255 #endif // !AARCH64 1256 Register value = AARCH64_ONLY(Rsaved_result) NOT_AARCH64(Rsaved_result_lo); 1257 __ resolve_jobject(value, // value 1258 Rtemp, // tmp1 1259 R1_tmp); // tmp2 1260 // Store resolved result in frame for GC visibility. 1261 __ str(value, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 1262 __ bind(Lnot_oop); 1263 } 1264 1265 #ifdef AARCH64 1266 // Restore SP (drop native parameters area), to keep SP in sync with extended_sp in frame 1267 __ restore_sp_after_call(Rtemp); 1268 __ check_stack_top(); 1269 #endif // AARCH64 1270 1271 // reguard stack if StackOverflow exception happened while in native. 1272 { 1273 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset())); 1274 __ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled); 1275 #ifdef AARCH64 1276 Label L; 1277 __ b(L, ne); 1278 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none); 1279 __ bind(L); 1280 #else 1281 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq); 1282 #if R9_IS_SCRATCHED 1283 __ restore_method(); 1284 #endif 1285 #endif // AARCH64 1286 } 1287 1288 // check pending exceptions 1289 { 1290 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 1291 #ifdef AARCH64 1292 Label L; 1293 __ cbz(Rtemp, L); 1294 __ mov_pc_to(Rexception_pc); 1295 __ b(StubRoutines::forward_exception_entry()); 1296 __ bind(L); 1297 #else 1298 __ cmp(Rtemp, 0); 1299 __ mov(Rexception_pc, PC, ne); 1300 __ b(StubRoutines::forward_exception_entry(), ne); 1301 #endif // AARCH64 1302 } 1303 1304 if (synchronized) { 1305 // address of first monitor 1306 __ sub(R1, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize); 1307 __ unlock_object(R1); 1308 } 1309 1310 // jvmti/dtrace support 1311 // Note: This must happen _after_ handling/throwing any exceptions since 1312 // the exception handler code notifies the runtime of method exits 1313 // too. If this happens before, method entry/exit notifications are 1314 // not properly paired (was bug - gri 11/22/99). 1315 #ifdef AARCH64 1316 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result, noreg, Dsaved_result); 1317 #else 1318 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp); 1319 #endif // AARCH64 1320 1321 // Restore the result. Oop result is restored from the stack. 1322 #ifdef AARCH64 1323 __ mov(R0, Rsaved_result); 1324 __ fmov_d(D0, Dsaved_result); 1325 1326 __ blr(Rresult_handler); 1327 #else 1328 __ cmp(Rresult_handler, 0); 1329 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne); 1330 __ mov(R0, Rsaved_result_lo, eq); 1331 __ mov(R1, Rsaved_result_hi); 1332 1333 #ifdef __ABI_HARD__ 1334 // reload native FP result 1335 __ fcpyd(D0, D8); 1336 #endif // __ABI_HARD__ 1337 1338 #ifdef ASSERT 1339 if (VerifyOops) { 1340 Label L; 1341 __ cmp(Rresult_handler, 0); 1342 __ b(L, eq); 1343 __ verify_oop(R0); 1344 __ bind(L); 1345 } 1346 #endif // ASSERT 1347 #endif // AARCH64 1348 1349 // Restore FP/LR, sender_sp and return 1350 #ifdef AARCH64 1351 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); 1352 __ ldp(FP, LR, Address(FP)); 1353 __ mov(SP, Rtemp); 1354 #else 1355 __ mov(Rtemp, FP); 1356 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); 1357 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1358 #endif // AARCH64 1359 1360 __ ret(); 1361 1362 if (inc_counter) { 1363 // Handle overflow of counter and compile method 1364 __ bind(invocation_counter_overflow); 1365 generate_counter_overflow(continue_after_compile); 1366 } 1367 1368 return entry_point; 1369 } 1370 1371 // 1372 // Generic interpreted method entry to (asm) interpreter 1373 // 1374 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1375 // determine code generation flags 1376 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1377 1378 // Rmethod: Method* 1379 // Rthread: thread 1380 // Rsender_sp: sender sp (could differ from SP if we were called via c2i) 1381 // Rparams: pointer to the last parameter in the stack 1382 1383 address entry_point = __ pc(); 1384 1385 const Register RconstMethod = AARCH64_ONLY(R10) NOT_AARCH64(R3); 1386 1387 #ifdef AARCH64 1388 const Register RmaxStack = R11; 1389 const Register RlocalsBase = R12; 1390 #endif // AARCH64 1391 1392 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 1393 1394 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 1395 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset())); 1396 1397 // setup Rlocals 1398 __ sub(Rlocals, Rparams, wordSize); 1399 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize)); 1400 1401 __ sub(R3, R3, R2); // number of additional locals 1402 1403 #ifdef AARCH64 1404 // setup RmaxStack 1405 __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset())); 1406 // We have to add extra reserved slots to max_stack. There are 3 users of the extra slots, 1407 // none of which are at the same time, so we just need to make sure there is enough room 1408 // for the biggest user: 1409 // -reserved slot for exception handler 1410 // -reserved slots for JSR292. Method::extra_stack_entries() is the size. 1411 // -3 reserved slots so get_method_counters() can save some registers before call_VM(). 1412 __ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries())); 1413 #endif // AARCH64 1414 1415 // see if we've got enough room on the stack for locals plus overhead. 1416 generate_stack_overflow_check(); 1417 1418 #ifdef AARCH64 1419 1420 // allocate space for locals 1421 { 1422 __ sub(RlocalsBase, Rparams, AsmOperand(R3, lsl, Interpreter::logStackElementSize)); 1423 __ align_reg(SP, RlocalsBase, StackAlignmentInBytes); 1424 } 1425 1426 // explicitly initialize locals 1427 { 1428 Label zero_loop, done; 1429 __ cbz(R3, done); 1430 1431 __ tbz(R3, 0, zero_loop); 1432 __ subs(R3, R3, 1); 1433 __ str(ZR, Address(RlocalsBase, wordSize, post_indexed)); 1434 __ b(done, eq); 1435 1436 __ bind(zero_loop); 1437 __ subs(R3, R3, 2); 1438 __ stp(ZR, ZR, Address(RlocalsBase, 2*wordSize, post_indexed)); 1439 __ b(zero_loop, ne); 1440 1441 __ bind(done); 1442 } 1443 1444 #else 1445 // allocate space for locals 1446 // explicitly initialize locals 1447 1448 // Loop is unrolled 4 times 1449 Label loop; 1450 __ mov(R0, 0); 1451 __ bind(loop); 1452 1453 // #1 1454 __ subs(R3, R3, 1); 1455 __ push(R0, ge); 1456 1457 // #2 1458 __ subs(R3, R3, 1, ge); 1459 __ push(R0, ge); 1460 1461 // #3 1462 __ subs(R3, R3, 1, ge); 1463 __ push(R0, ge); 1464 1465 // #4 1466 __ subs(R3, R3, 1, ge); 1467 __ push(R0, ge); 1468 1469 __ b(loop, gt); 1470 #endif // AARCH64 1471 1472 // initialize fixed part of activation frame 1473 generate_fixed_frame(false); 1474 1475 __ restore_dispatch(); 1476 1477 // make sure method is not native & not abstract 1478 #ifdef ASSERT 1479 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1480 { 1481 Label L; 1482 __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1483 __ stop("tried to execute native method as non-native"); 1484 __ bind(L); 1485 } 1486 { Label L; 1487 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1488 __ stop("tried to execute abstract method in interpreter"); 1489 __ bind(L); 1490 } 1491 #endif 1492 1493 // increment invocation count & check for overflow 1494 Label invocation_counter_overflow; 1495 Label profile_method; 1496 Label profile_method_continue; 1497 if (inc_counter) { 1498 if (synchronized) { 1499 // Avoid unlocking method's monitor in case of exception, as it has not 1500 // been locked yet. 1501 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1502 } 1503 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1504 if (ProfileInterpreter) { 1505 __ bind(profile_method_continue); 1506 } 1507 } 1508 Label continue_after_compile; 1509 __ bind(continue_after_compile); 1510 1511 if (inc_counter && synchronized) { 1512 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1513 } 1514 #if R9_IS_SCRATCHED 1515 __ restore_method(); 1516 #endif 1517 1518 // check for synchronized methods 1519 // Must happen AFTER invocation_counter check and stack overflow check, 1520 // so method is not locked if overflows. 1521 // 1522 if (synchronized) { 1523 // Allocate monitor and lock method 1524 lock_method(); 1525 } else { 1526 // no synchronization necessary 1527 #ifdef ASSERT 1528 { Label L; 1529 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1530 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1531 __ stop("method needs synchronization"); 1532 __ bind(L); 1533 } 1534 #endif 1535 } 1536 1537 // start execution 1538 #ifdef ASSERT 1539 { Label L; 1540 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1541 __ cmp(Rtemp, Rstack_top); 1542 __ b(L, eq); 1543 __ stop("broken stack frame setup in interpreter"); 1544 __ bind(L); 1545 } 1546 #endif 1547 __ check_extended_sp(Rtemp); 1548 1549 // jvmti support 1550 __ notify_method_entry(); 1551 #if R9_IS_SCRATCHED 1552 __ restore_method(); 1553 #endif 1554 1555 __ dispatch_next(vtos); 1556 1557 // invocation counter overflow 1558 if (inc_counter) { 1559 if (ProfileInterpreter) { 1560 // We have decided to profile this method in the interpreter 1561 __ bind(profile_method); 1562 1563 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1564 __ set_method_data_pointer_for_bcp(); 1565 1566 __ b(profile_method_continue); 1567 } 1568 1569 // Handle overflow of counter and compile method 1570 __ bind(invocation_counter_overflow); 1571 generate_counter_overflow(continue_after_compile); 1572 } 1573 1574 return entry_point; 1575 } 1576 1577 //------------------------------------------------------------------------------------------------------------------------ 1578 // Exceptions 1579 1580 void TemplateInterpreterGenerator::generate_throw_exception() { 1581 // Entry point in previous activation (i.e., if the caller was interpreted) 1582 Interpreter::_rethrow_exception_entry = __ pc(); 1583 // Rexception_obj: exception 1584 1585 #ifndef AARCH64 1586 // Clear interpreter_frame_last_sp. 1587 __ mov(Rtemp, 0); 1588 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1589 #endif // !AARCH64 1590 1591 #if R9_IS_SCRATCHED 1592 __ restore_method(); 1593 #endif 1594 __ restore_bcp(); 1595 __ restore_dispatch(); 1596 __ restore_locals(); 1597 1598 #ifdef AARCH64 1599 __ restore_sp_after_call(Rtemp); 1600 #endif // AARCH64 1601 1602 // Entry point for exceptions thrown within interpreter code 1603 Interpreter::_throw_exception_entry = __ pc(); 1604 1605 // expression stack is undefined here 1606 // Rexception_obj: exception 1607 // Rbcp: exception bcp 1608 __ verify_oop(Rexception_obj); 1609 1610 // expression stack must be empty before entering the VM in case of an exception 1611 __ empty_expression_stack(); 1612 // find exception handler address and preserve exception oop 1613 __ mov(R1, Rexception_obj); 1614 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1); 1615 // R0: exception handler entry point 1616 // Rexception_obj: preserved exception oop 1617 // Rbcp: bcp for exception handler 1618 __ push_ptr(Rexception_obj); // push exception which is now the only value on the stack 1619 __ jump(R0); // jump to exception handler (may be _remove_activation_entry!) 1620 1621 // If the exception is not handled in the current frame the frame is removed and 1622 // the exception is rethrown (i.e. exception continuation is _rethrow_exception). 1623 // 1624 // Note: At this point the bci is still the bxi for the instruction which caused 1625 // the exception and the expression stack is empty. Thus, for any VM calls 1626 // at this point, GC will find a legal oop map (with empty expression stack). 1627 1628 // In current activation 1629 // tos: exception 1630 // Rbcp: exception bcp 1631 1632 // 1633 // JVMTI PopFrame support 1634 // 1635 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1636 1637 #ifdef AARCH64 1638 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1639 #endif // AARCH64 1640 1641 __ empty_expression_stack(); 1642 1643 // Set the popframe_processing bit in _popframe_condition indicating that we are 1644 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1645 // popframe handling cycles. 1646 1647 __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1648 __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit); 1649 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1650 1651 { 1652 // Check to see whether we are returning to a deoptimized frame. 1653 // (The PopFrame call ensures that the caller of the popped frame is 1654 // either interpreted or compiled and deoptimizes it if compiled.) 1655 // In this case, we can't call dispatch_next() after the frame is 1656 // popped, but instead must save the incoming arguments and restore 1657 // them after deoptimization has occurred. 1658 // 1659 // Note that we don't compare the return PC against the 1660 // deoptimization blob's unpack entry because of the presence of 1661 // adapter frames in C2. 1662 Label caller_not_deoptimized; 1663 __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize)); 1664 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0); 1665 __ cbnz_32(R0, caller_not_deoptimized); 1666 #ifdef AARCH64 1667 __ NOT_TESTED(); 1668 #endif 1669 1670 // Compute size of arguments for saving when returning to deoptimized caller 1671 __ restore_method(); 1672 __ ldr(R0, Address(Rmethod, Method::const_offset())); 1673 __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset())); 1674 1675 __ logical_shift_left(R1, R0, Interpreter::logStackElementSize); 1676 // Save these arguments 1677 __ restore_locals(); 1678 __ sub(R2, Rlocals, R1); 1679 __ add(R2, R2, wordSize); 1680 __ mov(R0, Rthread); 1681 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2); 1682 1683 __ remove_activation(vtos, LR, 1684 /* throw_monitor_exception */ false, 1685 /* install_monitor_exception */ false, 1686 /* notify_jvmdi */ false); 1687 1688 // Inform deoptimization that it is responsible for restoring these arguments 1689 __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit); 1690 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1691 1692 // Continue in deoptimization handler 1693 __ ret(); 1694 1695 __ bind(caller_not_deoptimized); 1696 } 1697 1698 __ remove_activation(vtos, R4, 1699 /* throw_monitor_exception */ false, 1700 /* install_monitor_exception */ false, 1701 /* notify_jvmdi */ false); 1702 1703 #ifndef AARCH64 1704 // Finish with popframe handling 1705 // A previous I2C followed by a deoptimization might have moved the 1706 // outgoing arguments further up the stack. PopFrame expects the 1707 // mutations to those outgoing arguments to be preserved and other 1708 // constraints basically require this frame to look exactly as 1709 // though it had previously invoked an interpreted activation with 1710 // no space between the top of the expression stack (current 1711 // last_sp) and the top of stack. Rather than force deopt to 1712 // maintain this kind of invariant all the time we call a small 1713 // fixup routine to move the mutated arguments onto the top of our 1714 // expression stack if necessary. 1715 __ mov(R1, SP); 1716 __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1717 // PC must point into interpreter here 1718 __ set_last_Java_frame(SP, FP, true, Rtemp); 1719 __ mov(R0, Rthread); 1720 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2); 1721 __ reset_last_Java_frame(Rtemp); 1722 #endif // !AARCH64 1723 1724 #ifdef AARCH64 1725 __ restore_sp_after_call(Rtemp); 1726 __ restore_stack_top(); 1727 #else 1728 // Restore the last_sp and null it out 1729 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1730 __ mov(Rtemp, (int)NULL_WORD); 1731 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1732 #endif // AARCH64 1733 1734 __ restore_bcp(); 1735 __ restore_dispatch(); 1736 __ restore_locals(); 1737 __ restore_method(); 1738 1739 // The method data pointer was incremented already during 1740 // call profiling. We have to restore the mdp for the current bcp. 1741 if (ProfileInterpreter) { 1742 __ set_method_data_pointer_for_bcp(); 1743 } 1744 1745 // Clear the popframe condition flag 1746 assert(JavaThread::popframe_inactive == 0, "adjust this code"); 1747 __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset())); 1748 1749 #if INCLUDE_JVMTI 1750 { 1751 Label L_done; 1752 1753 __ ldrb(Rtemp, Address(Rbcp, 0)); 1754 __ cmp(Rtemp, Bytecodes::_invokestatic); 1755 __ b(L_done, ne); 1756 1757 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1758 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1759 1760 // get local0 1761 __ ldr(R1, Address(Rlocals, 0)); 1762 __ mov(R2, Rmethod); 1763 __ mov(R3, Rbcp); 1764 __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3); 1765 1766 __ cbz(R0, L_done); 1767 1768 __ str(R0, Address(Rstack_top)); 1769 __ bind(L_done); 1770 } 1771 #endif // INCLUDE_JVMTI 1772 1773 __ dispatch_next(vtos); 1774 // end of PopFrame support 1775 1776 Interpreter::_remove_activation_entry = __ pc(); 1777 1778 // preserve exception over this code sequence 1779 __ pop_ptr(R0_tos); 1780 __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset())); 1781 // remove the activation (without doing throws on illegalMonitorExceptions) 1782 __ remove_activation(vtos, Rexception_pc, false, true, false); 1783 // restore exception 1784 __ get_vm_result(Rexception_obj, Rtemp); 1785 1786 // Inbetween activations - previous activation type unknown yet 1787 // compute continuation point - the continuation point expects 1788 // the following registers set up: 1789 // 1790 // Rexception_obj: exception 1791 // Rexception_pc: return address/pc that threw exception 1792 // SP: expression stack of caller 1793 // FP: frame pointer of caller 1794 __ mov(c_rarg0, Rthread); 1795 __ mov(c_rarg1, Rexception_pc); 1796 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1); 1797 // Note that an "issuing PC" is actually the next PC after the call 1798 1799 __ jump(R0); // jump to exception handler of caller 1800 } 1801 1802 1803 // 1804 // JVMTI ForceEarlyReturn support 1805 // 1806 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1807 address entry = __ pc(); 1808 1809 #ifdef AARCH64 1810 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1811 #endif // AARCH64 1812 1813 __ restore_bcp(); 1814 __ restore_dispatch(); 1815 __ restore_locals(); 1816 1817 __ empty_expression_stack(); 1818 1819 __ load_earlyret_value(state); 1820 1821 // Clear the earlyret state 1822 __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset())); 1823 1824 assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code"); 1825 __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset())); 1826 1827 __ remove_activation(state, LR, 1828 false, /* throw_monitor_exception */ 1829 false, /* install_monitor_exception */ 1830 true); /* notify_jvmdi */ 1831 1832 #ifndef AARCH64 1833 // According to interpreter calling conventions, result is returned in R0/R1, 1834 // so ftos (S0) and dtos (D0) are moved to R0/R1. 1835 // This conversion should be done after remove_activation, as it uses 1836 // push(state) & pop(state) to preserve return value. 1837 __ convert_tos_to_retval(state); 1838 #endif // !AARCH64 1839 __ ret(); 1840 1841 return entry; 1842 } // end of ForceEarlyReturn support 1843 1844 1845 //------------------------------------------------------------------------------------------------------------------------ 1846 // Helper for vtos entry point generation 1847 1848 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1849 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1850 Label L; 1851 1852 #ifdef __SOFTFP__ 1853 dep = __ pc(); // fall through 1854 #else 1855 fep = __ pc(); __ push(ftos); __ b(L); 1856 dep = __ pc(); __ push(dtos); __ b(L); 1857 #endif // __SOFTFP__ 1858 1859 lep = __ pc(); __ push(ltos); __ b(L); 1860 1861 if (AARCH64_ONLY(true) NOT_AARCH64(VerifyOops)) { // can't share atos entry with itos on AArch64 or if VerifyOops 1862 aep = __ pc(); __ push(atos); __ b(L); 1863 } else { 1864 aep = __ pc(); // fall through 1865 } 1866 1867 #ifdef __SOFTFP__ 1868 fep = __ pc(); // fall through 1869 #endif // __SOFTFP__ 1870 1871 bep = cep = sep = // fall through 1872 iep = __ pc(); __ push(itos); // fall through 1873 vep = __ pc(); __ bind(L); // fall through 1874 generate_and_dispatch(t); 1875 } 1876 1877 //------------------------------------------------------------------------------------------------------------------------ 1878 1879 // Non-product code 1880 #ifndef PRODUCT 1881 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1882 address entry = __ pc(); 1883 1884 // prepare expression stack 1885 __ push(state); // save tosca 1886 1887 // pass tosca registers as arguments 1888 __ mov(R2, R0_tos); 1889 #ifdef AARCH64 1890 __ mov(R3, ZR); 1891 #else 1892 __ mov(R3, R1_tos_hi); 1893 #endif // AARCH64 1894 __ mov(R1, LR); // save return address 1895 1896 // call tracer 1897 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3); 1898 1899 __ mov(LR, R0); // restore return address 1900 __ pop(state); // restore tosca 1901 1902 // return 1903 __ ret(); 1904 1905 return entry; 1906 } 1907 1908 1909 void TemplateInterpreterGenerator::count_bytecode() { 1910 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true); 1911 } 1912 1913 1914 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1915 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true); 1916 } 1917 1918 1919 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1920 const Register Rindex_addr = R2_tmp; 1921 Label Lcontinue; 1922 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters); 1923 InlinedAddress Lindex((address)&BytecodePairHistogram::_index); 1924 const Register Rcounters_addr = R2_tmp; 1925 const Register Rindex = R4_tmp; 1926 1927 // calculate new index for counter: 1928 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes). 1929 // (_index >> log2_number_of_codes) is previous bytecode 1930 1931 __ ldr_literal(Rindex_addr, Lindex); 1932 __ ldr_s32(Rindex, Address(Rindex_addr)); 1933 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1934 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes)); 1935 __ str_32(Rindex, Address(Rindex_addr)); 1936 1937 // Rindex (R4) contains index of counter 1938 1939 __ ldr_literal(Rcounters_addr, Lcounters); 1940 __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1941 __ adds_32(Rtemp, Rtemp, 1); 1942 __ b(Lcontinue, mi); // avoid overflow 1943 __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1944 1945 __ b(Lcontinue); 1946 1947 __ bind_literal(Lindex); 1948 __ bind_literal(Lcounters); 1949 1950 __ bind(Lcontinue); 1951 } 1952 1953 1954 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1955 // Call a little run-time stub to avoid blow-up for each bytecode. 1956 // The run-time runtime saves the right registers, depending on 1957 // the tosca in-state for the given template. 1958 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1959 "entry must have been generated"); 1960 address trace_entry = Interpreter::trace_code(t->tos_in()); 1961 __ call(trace_entry, relocInfo::none); 1962 } 1963 1964 1965 void TemplateInterpreterGenerator::stop_interpreter_at() { 1966 Label Lcontinue; 1967 const Register stop_at = R2_tmp; 1968 1969 __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value); 1970 __ mov_slow(stop_at, StopInterpreterAt); 1971 1972 // test bytecode counter 1973 __ cmp(Rtemp, stop_at); 1974 __ b(Lcontinue, ne); 1975 1976 __ trace_state("stop_interpreter_at"); 1977 __ breakpoint(); 1978 1979 __ bind(Lcontinue); 1980 } 1981 #endif // !PRODUCT